diff options
Diffstat (limited to 'lib/ansible/galaxy')
77 files changed, 7123 insertions, 0 deletions
diff --git a/lib/ansible/galaxy/__init__.py b/lib/ansible/galaxy/__init__.py new file mode 100644 index 0000000..d3b9035 --- /dev/null +++ b/lib/ansible/galaxy/__init__.py @@ -0,0 +1,72 @@ +######################################################################## +# +# (C) 2015, Brian Coca <bcoca@ansible.com> +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see <http://www.gnu.org/licenses/>. +# +######################################################################## +''' This manages remote shared Ansible objects, mainly roles''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os + +import ansible.constants as C +from ansible import context +from ansible.module_utils._text import to_bytes +from ansible.module_utils.common.yaml import yaml_load + +# default_readme_template +# default_meta_template + + +def get_collections_galaxy_meta_info(): + meta_path = os.path.join(os.path.dirname(__file__), 'data', 'collections_galaxy_meta.yml') + with open(to_bytes(meta_path, errors='surrogate_or_strict'), 'rb') as galaxy_obj: + return yaml_load(galaxy_obj) + + +class Galaxy(object): + ''' Keeps global galaxy info ''' + + def __init__(self): + # TODO: eventually remove this as it contains a mismash of properties that aren't really global + + # roles_path needs to be a list and will be by default + roles_path = context.CLIARGS.get('roles_path', C.DEFAULT_ROLES_PATH) + # cli option handling is responsible for splitting roles_path + self.roles_paths = roles_path + + self.roles = {} + + # load data path for resource usage + this_dir, this_filename = os.path.split(__file__) + type_path = context.CLIARGS.get('role_type', 'default') + if type_path == 'default': + type_path = os.path.join(type_path, context.CLIARGS.get('type')) + + self.DATA_PATH = os.path.join(this_dir, 'data', type_path) + + @property + def default_role_skeleton_path(self): + return self.DATA_PATH + + def add_role(self, role): + self.roles[role.name] = role + + def remove_role(self, role_name): + del self.roles[role_name] diff --git a/lib/ansible/galaxy/api.py b/lib/ansible/galaxy/api.py new file mode 100644 index 0000000..8dea804 --- /dev/null +++ b/lib/ansible/galaxy/api.py @@ -0,0 +1,913 @@ +# (C) 2013, James Cammarata <jcammarata@ansible.com> +# Copyright: (c) 2019, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import collections +import datetime +import functools +import hashlib +import json +import os +import stat +import tarfile +import time +import threading + +from urllib.error import HTTPError +from urllib.parse import quote as urlquote, urlencode, urlparse, parse_qs, urljoin + +from ansible import constants as C +from ansible.errors import AnsibleError +from ansible.galaxy.user_agent import user_agent +from ansible.module_utils.api import retry_with_delays_and_condition +from ansible.module_utils.api import generate_jittered_backoff +from ansible.module_utils.six import string_types +from ansible.module_utils._text import to_bytes, to_native, to_text +from ansible.module_utils.urls import open_url, prepare_multipart +from ansible.utils.display import Display +from ansible.utils.hashing import secure_hash_s +from ansible.utils.path import makedirs_safe + +display = Display() +_CACHE_LOCK = threading.Lock() +COLLECTION_PAGE_SIZE = 100 +RETRY_HTTP_ERROR_CODES = [ # TODO: Allow user-configuration + 429, # Too Many Requests + 520, # Galaxy rate limit error code (Cloudflare unknown error) +] + + +def cache_lock(func): + def wrapped(*args, **kwargs): + with _CACHE_LOCK: + return func(*args, **kwargs) + + return wrapped + + +def is_rate_limit_exception(exception): + # Note: cloud.redhat.com masks rate limit errors with 403 (Forbidden) error codes. + # Since 403 could reflect the actual problem (such as an expired token), we should + # not retry by default. + return isinstance(exception, GalaxyError) and exception.http_code in RETRY_HTTP_ERROR_CODES + + +def g_connect(versions): + """ + Wrapper to lazily initialize connection info to Galaxy and verify the API versions required are available on the + endpoint. + + :param versions: A list of API versions that the function supports. + """ + def decorator(method): + def wrapped(self, *args, **kwargs): + if not self._available_api_versions: + display.vvvv("Initial connection to galaxy_server: %s" % self.api_server) + + # Determine the type of Galaxy server we are talking to. First try it unauthenticated then with Bearer + # auth for Automation Hub. + n_url = self.api_server + error_context_msg = 'Error when finding available api versions from %s (%s)' % (self.name, n_url) + + if self.api_server == 'https://galaxy.ansible.com' or self.api_server == 'https://galaxy.ansible.com/': + n_url = 'https://galaxy.ansible.com/api/' + + try: + data = self._call_galaxy(n_url, method='GET', error_context_msg=error_context_msg, cache=True) + except (AnsibleError, GalaxyError, ValueError, KeyError) as err: + # Either the URL doesnt exist, or other error. Or the URL exists, but isn't a galaxy API + # root (not JSON, no 'available_versions') so try appending '/api/' + if n_url.endswith('/api') or n_url.endswith('/api/'): + raise + + # Let exceptions here bubble up but raise the original if this returns a 404 (/api/ wasn't found). + n_url = _urljoin(n_url, '/api/') + try: + data = self._call_galaxy(n_url, method='GET', error_context_msg=error_context_msg, cache=True) + except GalaxyError as new_err: + if new_err.http_code == 404: + raise err + raise + + if 'available_versions' not in data: + raise AnsibleError("Tried to find galaxy API root at %s but no 'available_versions' are available " + "on %s" % (n_url, self.api_server)) + + # Update api_server to point to the "real" API root, which in this case could have been the configured + # url + '/api/' appended. + self.api_server = n_url + + # Default to only supporting v1, if only v1 is returned we also assume that v2 is available even though + # it isn't returned in the available_versions dict. + available_versions = data.get('available_versions', {u'v1': u'v1/'}) + if list(available_versions.keys()) == [u'v1']: + available_versions[u'v2'] = u'v2/' + + self._available_api_versions = available_versions + display.vvvv("Found API version '%s' with Galaxy server %s (%s)" + % (', '.join(available_versions.keys()), self.name, self.api_server)) + + # Verify that the API versions the function works with are available on the server specified. + available_versions = set(self._available_api_versions.keys()) + common_versions = set(versions).intersection(available_versions) + if not common_versions: + raise AnsibleError("Galaxy action %s requires API versions '%s' but only '%s' are available on %s %s" + % (method.__name__, ", ".join(versions), ", ".join(available_versions), + self.name, self.api_server)) + + return method(self, *args, **kwargs) + return wrapped + return decorator + + +def get_cache_id(url): + """ Gets the cache ID for the URL specified. """ + url_info = urlparse(url) + + port = None + try: + port = url_info.port + except ValueError: + pass # While the URL is probably invalid, let the caller figure that out when using it + + # Cannot use netloc because it could contain credentials if the server specified had them in there. + return '%s:%s' % (url_info.hostname, port or '') + + +@cache_lock +def _load_cache(b_cache_path): + """ Loads the cache file requested if possible. The file must not be world writable. """ + cache_version = 1 + + if not os.path.isfile(b_cache_path): + display.vvvv("Creating Galaxy API response cache file at '%s'" % to_text(b_cache_path)) + with open(b_cache_path, 'w'): + os.chmod(b_cache_path, 0o600) + + cache_mode = os.stat(b_cache_path).st_mode + if cache_mode & stat.S_IWOTH: + display.warning("Galaxy cache has world writable access (%s), ignoring it as a cache source." + % to_text(b_cache_path)) + return + + with open(b_cache_path, mode='rb') as fd: + json_val = to_text(fd.read(), errors='surrogate_or_strict') + + try: + cache = json.loads(json_val) + except ValueError: + cache = None + + if not isinstance(cache, dict) or cache.get('version', None) != cache_version: + display.vvvv("Galaxy cache file at '%s' has an invalid version, clearing" % to_text(b_cache_path)) + cache = {'version': cache_version} + + # Set the cache after we've cleared the existing entries + with open(b_cache_path, mode='wb') as fd: + fd.write(to_bytes(json.dumps(cache), errors='surrogate_or_strict')) + + return cache + + +def _urljoin(*args): + return '/'.join(to_native(a, errors='surrogate_or_strict').strip('/') for a in args + ('',) if a) + + +class GalaxyError(AnsibleError): + """ Error for bad Galaxy server responses. """ + + def __init__(self, http_error, message): + super(GalaxyError, self).__init__(message) + self.http_code = http_error.code + self.url = http_error.geturl() + + try: + http_msg = to_text(http_error.read()) + err_info = json.loads(http_msg) + except (AttributeError, ValueError): + err_info = {} + + url_split = self.url.split('/') + if 'v2' in url_split: + galaxy_msg = err_info.get('message', http_error.reason) + code = err_info.get('code', 'Unknown') + full_error_msg = u"%s (HTTP Code: %d, Message: %s Code: %s)" % (message, self.http_code, galaxy_msg, code) + elif 'v3' in url_split: + errors = err_info.get('errors', []) + if not errors: + errors = [{}] # Defaults are set below, we just need to make sure 1 error is present. + + message_lines = [] + for error in errors: + error_msg = error.get('detail') or error.get('title') or http_error.reason + error_code = error.get('code') or 'Unknown' + message_line = u"(HTTP Code: %d, Message: %s Code: %s)" % (self.http_code, error_msg, error_code) + message_lines.append(message_line) + + full_error_msg = "%s %s" % (message, ', '.join(message_lines)) + else: + # v1 and unknown API endpoints + galaxy_msg = err_info.get('default', http_error.reason) + full_error_msg = u"%s (HTTP Code: %d, Message: %s)" % (message, self.http_code, galaxy_msg) + + self.message = to_native(full_error_msg) + + +# Keep the raw string results for the date. It's too complex to parse as a datetime object and the various APIs return +# them in different formats. +CollectionMetadata = collections.namedtuple('CollectionMetadata', ['namespace', 'name', 'created_str', 'modified_str']) + + +class CollectionVersionMetadata: + + def __init__(self, namespace, name, version, download_url, artifact_sha256, dependencies, signatures_url, signatures): + """ + Contains common information about a collection on a Galaxy server to smooth through API differences for + Collection and define a standard meta info for a collection. + + :param namespace: The namespace name. + :param name: The collection name. + :param version: The version that the metadata refers to. + :param download_url: The URL to download the collection. + :param artifact_sha256: The SHA256 of the collection artifact for later verification. + :param dependencies: A dict of dependencies of the collection. + :param signatures_url: The URL to the specific version of the collection. + :param signatures: The list of signatures found at the signatures_url. + """ + self.namespace = namespace + self.name = name + self.version = version + self.download_url = download_url + self.artifact_sha256 = artifact_sha256 + self.dependencies = dependencies + self.signatures_url = signatures_url + self.signatures = signatures + + +@functools.total_ordering +class GalaxyAPI: + """ This class is meant to be used as a API client for an Ansible Galaxy server """ + + def __init__( + self, galaxy, name, url, + username=None, password=None, token=None, validate_certs=True, + available_api_versions=None, + clear_response_cache=False, no_cache=True, + priority=float('inf'), + timeout=60, + ): + self.galaxy = galaxy + self.name = name + self.username = username + self.password = password + self.token = token + self.api_server = url + self.validate_certs = validate_certs + self.timeout = timeout + self._available_api_versions = available_api_versions or {} + self._priority = priority + self._server_timeout = timeout + + b_cache_dir = to_bytes(C.GALAXY_CACHE_DIR, errors='surrogate_or_strict') + makedirs_safe(b_cache_dir, mode=0o700) + self._b_cache_path = os.path.join(b_cache_dir, b'api.json') + + if clear_response_cache: + with _CACHE_LOCK: + if os.path.exists(self._b_cache_path): + display.vvvv("Clearing cache file (%s)" % to_text(self._b_cache_path)) + os.remove(self._b_cache_path) + + self._cache = None + if not no_cache: + self._cache = _load_cache(self._b_cache_path) + + display.debug('Validate TLS certificates for %s: %s' % (self.api_server, self.validate_certs)) + + def __str__(self): + # type: (GalaxyAPI) -> str + """Render GalaxyAPI as a native string representation.""" + return to_native(self.name) + + def __unicode__(self): + # type: (GalaxyAPI) -> str + """Render GalaxyAPI as a unicode/text string representation.""" + return to_text(self.name) + + def __repr__(self): + # type: (GalaxyAPI) -> str + """Render GalaxyAPI as an inspectable string representation.""" + return ( + '<{instance!s} "{name!s}" @ {url!s} with priority {priority!s}>'. + format( + instance=self, name=self.name, + priority=self._priority, url=self.api_server, + ) + ) + + def __lt__(self, other_galaxy_api): + # type: (GalaxyAPI, GalaxyAPI) -> bool + """Return whether the instance priority is higher than other.""" + if not isinstance(other_galaxy_api, self.__class__): + return NotImplemented + + return ( + self._priority > other_galaxy_api._priority or + self.name < self.name + ) + + @property # type: ignore[misc] # https://github.com/python/mypy/issues/1362 + @g_connect(['v1', 'v2', 'v3']) + def available_api_versions(self): + # Calling g_connect will populate self._available_api_versions + return self._available_api_versions + + @retry_with_delays_and_condition( + backoff_iterator=generate_jittered_backoff(retries=6, delay_base=2, delay_threshold=40), + should_retry_error=is_rate_limit_exception + ) + def _call_galaxy(self, url, args=None, headers=None, method=None, auth_required=False, error_context_msg=None, + cache=False, cache_key=None): + url_info = urlparse(url) + cache_id = get_cache_id(url) + if not cache_key: + cache_key = url_info.path + query = parse_qs(url_info.query) + if cache and self._cache: + server_cache = self._cache.setdefault(cache_id, {}) + iso_datetime_format = '%Y-%m-%dT%H:%M:%SZ' + + valid = False + if cache_key in server_cache: + expires = datetime.datetime.strptime(server_cache[cache_key]['expires'], iso_datetime_format) + valid = datetime.datetime.utcnow() < expires + + is_paginated_url = 'page' in query or 'offset' in query + if valid and not is_paginated_url: + # Got a hit on the cache and we aren't getting a paginated response + path_cache = server_cache[cache_key] + if path_cache.get('paginated'): + if '/v3/' in cache_key: + res = {'links': {'next': None}} + else: + res = {'next': None} + + # Technically some v3 paginated APIs return in 'data' but the caller checks the keys for this so + # always returning the cache under results is fine. + res['results'] = [] + for result in path_cache['results']: + res['results'].append(result) + + else: + res = path_cache['results'] + + return res + + elif not is_paginated_url: + # The cache entry had expired or does not exist, start a new blank entry to be filled later. + expires = datetime.datetime.utcnow() + expires += datetime.timedelta(days=1) + server_cache[cache_key] = { + 'expires': expires.strftime(iso_datetime_format), + 'paginated': False, + } + + headers = headers or {} + self._add_auth_token(headers, url, required=auth_required) + + try: + display.vvvv("Calling Galaxy at %s" % url) + resp = open_url(to_native(url), data=args, validate_certs=self.validate_certs, headers=headers, + method=method, timeout=self._server_timeout, http_agent=user_agent(), follow_redirects='safe') + except HTTPError as e: + raise GalaxyError(e, error_context_msg) + except Exception as e: + raise AnsibleError("Unknown error when attempting to call Galaxy at '%s': %s" % (url, to_native(e))) + + resp_data = to_text(resp.read(), errors='surrogate_or_strict') + try: + data = json.loads(resp_data) + except ValueError: + raise AnsibleError("Failed to parse Galaxy response from '%s' as JSON:\n%s" + % (resp.url, to_native(resp_data))) + + if cache and self._cache: + path_cache = self._cache[cache_id][cache_key] + + # v3 can return data or results for paginated results. Scan the result so we can determine what to cache. + paginated_key = None + for key in ['data', 'results']: + if key in data: + paginated_key = key + break + + if paginated_key: + path_cache['paginated'] = True + results = path_cache.setdefault('results', []) + for result in data[paginated_key]: + results.append(result) + + else: + path_cache['results'] = data + + return data + + def _add_auth_token(self, headers, url, token_type=None, required=False): + # Don't add the auth token if one is already present + if 'Authorization' in headers: + return + + if not self.token and required: + raise AnsibleError("No access token or username set. A token can be set with --api-key " + "or at {0}.".format(to_native(C.GALAXY_TOKEN_PATH))) + + if self.token: + headers.update(self.token.headers()) + + @cache_lock + def _set_cache(self): + with open(self._b_cache_path, mode='wb') as fd: + fd.write(to_bytes(json.dumps(self._cache), errors='surrogate_or_strict')) + + @g_connect(['v1']) + def authenticate(self, github_token): + """ + Retrieve an authentication token + """ + url = _urljoin(self.api_server, self.available_api_versions['v1'], "tokens") + '/' + args = urlencode({"github_token": github_token}) + + try: + resp = open_url(url, data=args, validate_certs=self.validate_certs, method="POST", http_agent=user_agent(), timeout=self._server_timeout) + except HTTPError as e: + raise GalaxyError(e, 'Attempting to authenticate to galaxy') + except Exception as e: + raise AnsibleError('Unable to authenticate to galaxy: %s' % to_native(e), orig_exc=e) + + data = json.loads(to_text(resp.read(), errors='surrogate_or_strict')) + return data + + @g_connect(['v1']) + def create_import_task(self, github_user, github_repo, reference=None, role_name=None): + """ + Post an import request + """ + url = _urljoin(self.api_server, self.available_api_versions['v1'], "imports") + '/' + args = { + "github_user": github_user, + "github_repo": github_repo, + "github_reference": reference if reference else "" + } + if role_name: + args['alternate_role_name'] = role_name + elif github_repo.startswith('ansible-role'): + args['alternate_role_name'] = github_repo[len('ansible-role') + 1:] + data = self._call_galaxy(url, args=urlencode(args), method="POST") + if data.get('results', None): + return data['results'] + return data + + @g_connect(['v1']) + def get_import_task(self, task_id=None, github_user=None, github_repo=None): + """ + Check the status of an import task. + """ + url = _urljoin(self.api_server, self.available_api_versions['v1'], "imports") + if task_id is not None: + url = "%s?id=%d" % (url, task_id) + elif github_user is not None and github_repo is not None: + url = "%s?github_user=%s&github_repo=%s" % (url, github_user, github_repo) + else: + raise AnsibleError("Expected task_id or github_user and github_repo") + + data = self._call_galaxy(url) + return data['results'] + + @g_connect(['v1']) + def lookup_role_by_name(self, role_name, notify=True): + """ + Find a role by name. + """ + role_name = to_text(urlquote(to_bytes(role_name))) + + try: + parts = role_name.split(".") + user_name = ".".join(parts[0:-1]) + role_name = parts[-1] + if notify: + display.display("- downloading role '%s', owned by %s" % (role_name, user_name)) + except Exception: + raise AnsibleError("Invalid role name (%s). Specify role as format: username.rolename" % role_name) + + url = _urljoin(self.api_server, self.available_api_versions['v1'], "roles", + "?owner__username=%s&name=%s" % (user_name, role_name)) + data = self._call_galaxy(url) + if len(data["results"]) != 0: + return data["results"][0] + return None + + @g_connect(['v1']) + def fetch_role_related(self, related, role_id): + """ + Fetch the list of related items for the given role. + The url comes from the 'related' field of the role. + """ + + results = [] + try: + url = _urljoin(self.api_server, self.available_api_versions['v1'], "roles", role_id, related, + "?page_size=50") + data = self._call_galaxy(url) + results = data['results'] + done = (data.get('next_link', None) is None) + + # https://github.com/ansible/ansible/issues/64355 + # api_server contains part of the API path but next_link includes the /api part so strip it out. + url_info = urlparse(self.api_server) + base_url = "%s://%s/" % (url_info.scheme, url_info.netloc) + + while not done: + url = _urljoin(base_url, data['next_link']) + data = self._call_galaxy(url) + results += data['results'] + done = (data.get('next_link', None) is None) + except Exception as e: + display.warning("Unable to retrieve role (id=%s) data (%s), but this is not fatal so we continue: %s" + % (role_id, related, to_text(e))) + return results + + @g_connect(['v1']) + def get_list(self, what): + """ + Fetch the list of items specified. + """ + try: + url = _urljoin(self.api_server, self.available_api_versions['v1'], what, "?page_size") + data = self._call_galaxy(url) + if "results" in data: + results = data['results'] + else: + results = data + done = True + if "next" in data: + done = (data.get('next_link', None) is None) + while not done: + url = _urljoin(self.api_server, data['next_link']) + data = self._call_galaxy(url) + results += data['results'] + done = (data.get('next_link', None) is None) + return results + except Exception as error: + raise AnsibleError("Failed to download the %s list: %s" % (what, to_native(error))) + + @g_connect(['v1']) + def search_roles(self, search, **kwargs): + + search_url = _urljoin(self.api_server, self.available_api_versions['v1'], "search", "roles", "?") + + if search: + search_url += '&autocomplete=' + to_text(urlquote(to_bytes(search))) + + tags = kwargs.get('tags', None) + platforms = kwargs.get('platforms', None) + page_size = kwargs.get('page_size', None) + author = kwargs.get('author', None) + + if tags and isinstance(tags, string_types): + tags = tags.split(',') + search_url += '&tags_autocomplete=' + '+'.join(tags) + + if platforms and isinstance(platforms, string_types): + platforms = platforms.split(',') + search_url += '&platforms_autocomplete=' + '+'.join(platforms) + + if page_size: + search_url += '&page_size=%s' % page_size + + if author: + search_url += '&username_autocomplete=%s' % author + + data = self._call_galaxy(search_url) + return data + + @g_connect(['v1']) + def add_secret(self, source, github_user, github_repo, secret): + url = _urljoin(self.api_server, self.available_api_versions['v1'], "notification_secrets") + '/' + args = urlencode({ + "source": source, + "github_user": github_user, + "github_repo": github_repo, + "secret": secret + }) + data = self._call_galaxy(url, args=args, method="POST") + return data + + @g_connect(['v1']) + def list_secrets(self): + url = _urljoin(self.api_server, self.available_api_versions['v1'], "notification_secrets") + data = self._call_galaxy(url, auth_required=True) + return data + + @g_connect(['v1']) + def remove_secret(self, secret_id): + url = _urljoin(self.api_server, self.available_api_versions['v1'], "notification_secrets", secret_id) + '/' + data = self._call_galaxy(url, auth_required=True, method='DELETE') + return data + + @g_connect(['v1']) + def delete_role(self, github_user, github_repo): + url = _urljoin(self.api_server, self.available_api_versions['v1'], "removerole", + "?github_user=%s&github_repo=%s" % (github_user, github_repo)) + data = self._call_galaxy(url, auth_required=True, method='DELETE') + return data + + # Collection APIs # + + @g_connect(['v2', 'v3']) + def publish_collection(self, collection_path): + """ + Publishes a collection to a Galaxy server and returns the import task URI. + + :param collection_path: The path to the collection tarball to publish. + :return: The import task URI that contains the import results. + """ + display.display("Publishing collection artifact '%s' to %s %s" % (collection_path, self.name, self.api_server)) + + b_collection_path = to_bytes(collection_path, errors='surrogate_or_strict') + if not os.path.exists(b_collection_path): + raise AnsibleError("The collection path specified '%s' does not exist." % to_native(collection_path)) + elif not tarfile.is_tarfile(b_collection_path): + raise AnsibleError("The collection path specified '%s' is not a tarball, use 'ansible-galaxy collection " + "build' to create a proper release artifact." % to_native(collection_path)) + + with open(b_collection_path, 'rb') as collection_tar: + sha256 = secure_hash_s(collection_tar.read(), hash_func=hashlib.sha256) + + content_type, b_form_data = prepare_multipart( + { + 'sha256': sha256, + 'file': { + 'filename': b_collection_path, + 'mime_type': 'application/octet-stream', + }, + } + ) + + headers = { + 'Content-type': content_type, + 'Content-length': len(b_form_data), + } + + if 'v3' in self.available_api_versions: + n_url = _urljoin(self.api_server, self.available_api_versions['v3'], 'artifacts', 'collections') + '/' + else: + n_url = _urljoin(self.api_server, self.available_api_versions['v2'], 'collections') + '/' + + resp = self._call_galaxy(n_url, args=b_form_data, headers=headers, method='POST', auth_required=True, + error_context_msg='Error when publishing collection to %s (%s)' + % (self.name, self.api_server)) + + return resp['task'] + + @g_connect(['v2', 'v3']) + def wait_import_task(self, task_id, timeout=0): + """ + Waits until the import process on the Galaxy server has completed or the timeout is reached. + + :param task_id: The id of the import task to wait for. This can be parsed out of the return + value for GalaxyAPI.publish_collection. + :param timeout: The timeout in seconds, 0 is no timeout. + """ + state = 'waiting' + data = None + + # Construct the appropriate URL per version + if 'v3' in self.available_api_versions: + full_url = _urljoin(self.api_server, self.available_api_versions['v3'], + 'imports/collections', task_id, '/') + else: + full_url = _urljoin(self.api_server, self.available_api_versions['v2'], + 'collection-imports', task_id, '/') + + display.display("Waiting until Galaxy import task %s has completed" % full_url) + start = time.time() + wait = 2 + + while timeout == 0 or (time.time() - start) < timeout: + try: + data = self._call_galaxy(full_url, method='GET', auth_required=True, + error_context_msg='Error when getting import task results at %s' % full_url) + except GalaxyError as e: + if e.http_code != 404: + raise + # The import job may not have started, and as such, the task url may not yet exist + display.vvv('Galaxy import process has not started, wait %s seconds before trying again' % wait) + time.sleep(wait) + continue + + state = data.get('state', 'waiting') + + if data.get('finished_at', None): + break + + display.vvv('Galaxy import process has a status of %s, wait %d seconds before trying again' + % (state, wait)) + time.sleep(wait) + + # poor man's exponential backoff algo so we don't flood the Galaxy API, cap at 30 seconds. + wait = min(30, wait * 1.5) + if state == 'waiting': + raise AnsibleError("Timeout while waiting for the Galaxy import process to finish, check progress at '%s'" + % to_native(full_url)) + + for message in data.get('messages', []): + level = message['level'] + if level.lower() == 'error': + display.error("Galaxy import error message: %s" % message['message']) + elif level.lower() == 'warning': + display.warning("Galaxy import warning message: %s" % message['message']) + else: + display.vvv("Galaxy import message: %s - %s" % (level, message['message'])) + + if state == 'failed': + code = to_native(data['error'].get('code', 'UNKNOWN')) + description = to_native( + data['error'].get('description', "Unknown error, see %s for more details" % full_url)) + raise AnsibleError("Galaxy import process failed: %s (Code: %s)" % (description, code)) + + @g_connect(['v2', 'v3']) + def get_collection_metadata(self, namespace, name): + """ + Gets the collection information from the Galaxy server about a specific Collection. + + :param namespace: The collection namespace. + :param name: The collection name. + return: CollectionMetadata about the collection. + """ + if 'v3' in self.available_api_versions: + api_path = self.available_api_versions['v3'] + field_map = [ + ('created_str', 'created_at'), + ('modified_str', 'updated_at'), + ] + else: + api_path = self.available_api_versions['v2'] + field_map = [ + ('created_str', 'created'), + ('modified_str', 'modified'), + ] + + info_url = _urljoin(self.api_server, api_path, 'collections', namespace, name, '/') + error_context_msg = 'Error when getting the collection info for %s.%s from %s (%s)' \ + % (namespace, name, self.name, self.api_server) + data = self._call_galaxy(info_url, error_context_msg=error_context_msg) + + metadata = {} + for name, api_field in field_map: + metadata[name] = data.get(api_field, None) + + return CollectionMetadata(namespace, name, **metadata) + + @g_connect(['v2', 'v3']) + def get_collection_version_metadata(self, namespace, name, version): + """ + Gets the collection information from the Galaxy server about a specific Collection version. + + :param namespace: The collection namespace. + :param name: The collection name. + :param version: Version of the collection to get the information for. + :return: CollectionVersionMetadata about the collection at the version requested. + """ + api_path = self.available_api_versions.get('v3', self.available_api_versions.get('v2')) + url_paths = [self.api_server, api_path, 'collections', namespace, name, 'versions', version, '/'] + + n_collection_url = _urljoin(*url_paths) + error_context_msg = 'Error when getting collection version metadata for %s.%s:%s from %s (%s)' \ + % (namespace, name, version, self.name, self.api_server) + data = self._call_galaxy(n_collection_url, error_context_msg=error_context_msg, cache=True) + self._set_cache() + + signatures = data.get('signatures') or [] + + return CollectionVersionMetadata(data['namespace']['name'], data['collection']['name'], data['version'], + data['download_url'], data['artifact']['sha256'], + data['metadata']['dependencies'], data['href'], signatures) + + @g_connect(['v2', 'v3']) + def get_collection_versions(self, namespace, name): + """ + Gets a list of available versions for a collection on a Galaxy server. + + :param namespace: The collection namespace. + :param name: The collection name. + :return: A list of versions that are available. + """ + relative_link = False + if 'v3' in self.available_api_versions: + api_path = self.available_api_versions['v3'] + pagination_path = ['links', 'next'] + relative_link = True # AH pagination results are relative an not an absolute URI. + else: + api_path = self.available_api_versions['v2'] + pagination_path = ['next'] + + page_size_name = 'limit' if 'v3' in self.available_api_versions else 'page_size' + versions_url = _urljoin(self.api_server, api_path, 'collections', namespace, name, 'versions', '/?%s=%d' % (page_size_name, COLLECTION_PAGE_SIZE)) + versions_url_info = urlparse(versions_url) + cache_key = versions_url_info.path + + # We should only rely on the cache if the collection has not changed. This may slow things down but it ensures + # we are not waiting a day before finding any new collections that have been published. + if self._cache: + server_cache = self._cache.setdefault(get_cache_id(versions_url), {}) + modified_cache = server_cache.setdefault('modified', {}) + + try: + modified_date = self.get_collection_metadata(namespace, name).modified_str + except GalaxyError as err: + if err.http_code != 404: + raise + # No collection found, return an empty list to keep things consistent with the various APIs + return [] + + cached_modified_date = modified_cache.get('%s.%s' % (namespace, name), None) + if cached_modified_date != modified_date: + modified_cache['%s.%s' % (namespace, name)] = modified_date + if versions_url_info.path in server_cache: + del server_cache[cache_key] + + self._set_cache() + + error_context_msg = 'Error when getting available collection versions for %s.%s from %s (%s)' \ + % (namespace, name, self.name, self.api_server) + + try: + data = self._call_galaxy(versions_url, error_context_msg=error_context_msg, cache=True, cache_key=cache_key) + except GalaxyError as err: + if err.http_code != 404: + raise + # v3 doesn't raise a 404 so we need to mimick the empty response from APIs that do. + return [] + + if 'data' in data: + # v3 automation-hub is the only known API that uses `data` + # since v3 pulp_ansible does not, we cannot rely on version + # to indicate which key to use + results_key = 'data' + else: + results_key = 'results' + + versions = [] + while True: + versions += [v['version'] for v in data[results_key]] + + next_link = data + for path in pagination_path: + next_link = next_link.get(path, {}) + + if not next_link: + break + elif relative_link: + # TODO: This assumes the pagination result is relative to the root server. Will need to be verified + # with someone who knows the AH API. + + # Remove the query string from the versions_url to use the next_link's query + versions_url = urljoin(versions_url, urlparse(versions_url).path) + next_link = versions_url.replace(versions_url_info.path, next_link) + + data = self._call_galaxy(to_native(next_link, errors='surrogate_or_strict'), + error_context_msg=error_context_msg, cache=True, cache_key=cache_key) + self._set_cache() + + return versions + + @g_connect(['v2', 'v3']) + def get_collection_signatures(self, namespace, name, version): + """ + Gets the collection signatures from the Galaxy server about a specific Collection version. + + :param namespace: The collection namespace. + :param name: The collection name. + :param version: Version of the collection to get the information for. + :return: A list of signature strings. + """ + api_path = self.available_api_versions.get('v3', self.available_api_versions.get('v2')) + url_paths = [self.api_server, api_path, 'collections', namespace, name, 'versions', version, '/'] + + n_collection_url = _urljoin(*url_paths) + error_context_msg = 'Error when getting collection version metadata for %s.%s:%s from %s (%s)' \ + % (namespace, name, version, self.name, self.api_server) + data = self._call_galaxy(n_collection_url, error_context_msg=error_context_msg, cache=True) + self._set_cache() + + try: + signatures = data["signatures"] + except KeyError: + # Noisy since this is used by the dep resolver, so require more verbosity than Galaxy calls + display.vvvvvv(f"Server {self.api_server} has not signed {namespace}.{name}:{version}") + return [] + else: + return [signature_info["signature"] for signature_info in signatures] diff --git a/lib/ansible/galaxy/collection/__init__.py b/lib/ansible/galaxy/collection/__init__.py new file mode 100644 index 0000000..7a144c0 --- /dev/null +++ b/lib/ansible/galaxy/collection/__init__.py @@ -0,0 +1,1836 @@ +# -*- coding: utf-8 -*- +# Copyright: (c) 2019-2021, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +"""Installed collections management package.""" + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import errno +import fnmatch +import functools +import json +import os +import queue +import re +import shutil +import stat +import sys +import tarfile +import tempfile +import textwrap +import threading +import time +import typing as t + +from collections import namedtuple +from contextlib import contextmanager +from dataclasses import dataclass, fields as dc_fields +from hashlib import sha256 +from io import BytesIO +from importlib.metadata import distribution +from itertools import chain + +try: + from packaging.requirements import Requirement as PkgReq +except ImportError: + class PkgReq: # type: ignore[no-redef] + pass + + HAS_PACKAGING = False +else: + HAS_PACKAGING = True + +try: + from distlib.manifest import Manifest # type: ignore[import] + from distlib import DistlibException # type: ignore[import] +except ImportError: + HAS_DISTLIB = False +else: + HAS_DISTLIB = True + +if t.TYPE_CHECKING: + from ansible.galaxy.collection.concrete_artifact_manager import ( + ConcreteArtifactsManager, + ) + + ManifestKeysType = t.Literal[ + 'collection_info', 'file_manifest_file', 'format', + ] + FileMetaKeysType = t.Literal[ + 'name', + 'ftype', + 'chksum_type', + 'chksum_sha256', + 'format', + ] + CollectionInfoKeysType = t.Literal[ + # collection meta: + 'namespace', 'name', 'version', + 'authors', 'readme', + 'tags', 'description', + 'license', 'license_file', + 'dependencies', + 'repository', 'documentation', + 'homepage', 'issues', + + # files meta: + FileMetaKeysType, + ] + ManifestValueType = t.Dict[CollectionInfoKeysType, t.Union[int, str, t.List[str], t.Dict[str, str], None]] + CollectionManifestType = t.Dict[ManifestKeysType, ManifestValueType] + FileManifestEntryType = t.Dict[FileMetaKeysType, t.Union[str, int, None]] + FilesManifestType = t.Dict[t.Literal['files', 'format'], t.Union[t.List[FileManifestEntryType], int]] + +import ansible.constants as C +from ansible.errors import AnsibleError +from ansible.galaxy.api import GalaxyAPI +from ansible.galaxy.collection.concrete_artifact_manager import ( + _consume_file, + _download_file, + _get_json_from_installed_dir, + _get_meta_from_src_dir, + _tarfile_extract, +) +from ansible.galaxy.collection.galaxy_api_proxy import MultiGalaxyAPIProxy +from ansible.galaxy.collection.gpg import ( + run_gpg_verify, + parse_gpg_errors, + get_signature_from_source, + GPG_ERROR_MAP, +) +try: + from ansible.galaxy.dependency_resolution import ( + build_collection_dependency_resolver, + ) + from ansible.galaxy.dependency_resolution.errors import ( + CollectionDependencyResolutionImpossible, + CollectionDependencyInconsistentCandidate, + ) + from ansible.galaxy.dependency_resolution.providers import ( + RESOLVELIB_VERSION, + RESOLVELIB_LOWERBOUND, + RESOLVELIB_UPPERBOUND, + ) +except ImportError: + HAS_RESOLVELIB = False +else: + HAS_RESOLVELIB = True + +from ansible.galaxy.dependency_resolution.dataclasses import ( + Candidate, Requirement, _is_installed_collection_dir, +) +from ansible.galaxy.dependency_resolution.versioning import meets_requirements +from ansible.plugins.loader import get_all_plugin_loaders +from ansible.module_utils.six import raise_from +from ansible.module_utils._text import to_bytes, to_native, to_text +from ansible.module_utils.common.collections import is_sequence +from ansible.module_utils.common.yaml import yaml_dump +from ansible.utils.collection_loader import AnsibleCollectionRef +from ansible.utils.display import Display +from ansible.utils.hashing import secure_hash, secure_hash_s +from ansible.utils.sentinel import Sentinel + + +display = Display() + +MANIFEST_FORMAT = 1 +MANIFEST_FILENAME = 'MANIFEST.json' + +ModifiedContent = namedtuple('ModifiedContent', ['filename', 'expected', 'installed']) + +SIGNATURE_COUNT_RE = r"^(?P<strict>\+)?(?:(?P<count>\d+)|(?P<all>all))$" + + +@dataclass +class ManifestControl: + directives: list[str] = None + omit_default_directives: bool = False + + def __post_init__(self): + # Allow a dict representing this dataclass to be splatted directly. + # Requires attrs to have a default value, so anything with a default + # of None is swapped for its, potentially mutable, default + for field in dc_fields(self): + if getattr(self, field.name) is None: + super().__setattr__(field.name, field.type()) + + +class CollectionSignatureError(Exception): + def __init__(self, reasons=None, stdout=None, rc=None, ignore=False): + self.reasons = reasons + self.stdout = stdout + self.rc = rc + self.ignore = ignore + + self._reason_wrapper = None + + def _report_unexpected(self, collection_name): + return ( + f"Unexpected error for '{collection_name}': " + f"GnuPG signature verification failed with the return code {self.rc} and output {self.stdout}" + ) + + def _report_expected(self, collection_name): + header = f"Signature verification failed for '{collection_name}' (return code {self.rc}):" + return header + self._format_reasons() + + def _format_reasons(self): + if self._reason_wrapper is None: + self._reason_wrapper = textwrap.TextWrapper( + initial_indent=" * ", # 6 chars + subsequent_indent=" ", # 6 chars + ) + + wrapped_reasons = [ + '\n'.join(self._reason_wrapper.wrap(reason)) + for reason in self.reasons + ] + + return '\n' + '\n'.join(wrapped_reasons) + + def report(self, collection_name): + if self.reasons: + return self._report_expected(collection_name) + + return self._report_unexpected(collection_name) + + +# FUTURE: expose actual verify result details for a collection on this object, maybe reimplement as dataclass on py3.8+ +class CollectionVerifyResult: + def __init__(self, collection_name): # type: (str) -> None + self.collection_name = collection_name # type: str + self.success = True # type: bool + + +def verify_local_collection(local_collection, remote_collection, artifacts_manager): + # type: (Candidate, t.Optional[Candidate], ConcreteArtifactsManager) -> CollectionVerifyResult + """Verify integrity of the locally installed collection. + + :param local_collection: Collection being checked. + :param remote_collection: Upstream collection (optional, if None, only verify local artifact) + :param artifacts_manager: Artifacts manager. + :return: a collection verify result object. + """ + result = CollectionVerifyResult(local_collection.fqcn) + + b_collection_path = to_bytes(local_collection.src, errors='surrogate_or_strict') + + display.display("Verifying '{coll!s}'.".format(coll=local_collection)) + display.display( + u"Installed collection found at '{path!s}'". + format(path=to_text(local_collection.src)), + ) + + modified_content = [] # type: list[ModifiedContent] + + verify_local_only = remote_collection is None + + # partial away the local FS detail so we can just ask generically during validation + get_json_from_validation_source = functools.partial(_get_json_from_installed_dir, b_collection_path) + get_hash_from_validation_source = functools.partial(_get_file_hash, b_collection_path) + + if not verify_local_only: + # Compare installed version versus requirement version + if local_collection.ver != remote_collection.ver: + err = ( + "{local_fqcn!s} has the version '{local_ver!s}' but " + "is being compared to '{remote_ver!s}'".format( + local_fqcn=local_collection.fqcn, + local_ver=local_collection.ver, + remote_ver=remote_collection.ver, + ) + ) + display.display(err) + result.success = False + return result + + manifest_file = os.path.join(to_text(b_collection_path, errors='surrogate_or_strict'), MANIFEST_FILENAME) + signatures = list(local_collection.signatures) + if verify_local_only and local_collection.source_info is not None: + signatures = [info["signature"] for info in local_collection.source_info["signatures"]] + signatures + elif not verify_local_only and remote_collection.signatures: + signatures = list(remote_collection.signatures) + signatures + + keyring_configured = artifacts_manager.keyring is not None + if not keyring_configured and signatures: + display.warning( + "The GnuPG keyring used for collection signature " + "verification was not configured but signatures were " + "provided by the Galaxy server. " + "Configure a keyring for ansible-galaxy to verify " + "the origin of the collection. " + "Skipping signature verification." + ) + elif keyring_configured: + if not verify_file_signatures( + local_collection.fqcn, + manifest_file, + signatures, + artifacts_manager.keyring, + artifacts_manager.required_successful_signature_count, + artifacts_manager.ignore_signature_errors, + ): + result.success = False + return result + display.vvvv(f"GnuPG signature verification succeeded, verifying contents of {local_collection}") + + if verify_local_only: + # since we're not downloading this, just seed it with the value from disk + manifest_hash = get_hash_from_validation_source(MANIFEST_FILENAME) + elif keyring_configured and remote_collection.signatures: + manifest_hash = get_hash_from_validation_source(MANIFEST_FILENAME) + else: + # fetch remote + b_temp_tar_path = ( # NOTE: AnsibleError is raised on URLError + artifacts_manager.get_artifact_path + if remote_collection.is_concrete_artifact + else artifacts_manager.get_galaxy_artifact_path + )(remote_collection) + + display.vvv( + u"Remote collection cached as '{path!s}'".format(path=to_text(b_temp_tar_path)) + ) + + # partial away the tarball details so we can just ask generically during validation + get_json_from_validation_source = functools.partial(_get_json_from_tar_file, b_temp_tar_path) + get_hash_from_validation_source = functools.partial(_get_tar_file_hash, b_temp_tar_path) + + # Verify the downloaded manifest hash matches the installed copy before verifying the file manifest + manifest_hash = get_hash_from_validation_source(MANIFEST_FILENAME) + _verify_file_hash(b_collection_path, MANIFEST_FILENAME, manifest_hash, modified_content) + + display.display('MANIFEST.json hash: {manifest_hash}'.format(manifest_hash=manifest_hash)) + + manifest = get_json_from_validation_source(MANIFEST_FILENAME) + + # Use the manifest to verify the file manifest checksum + file_manifest_data = manifest['file_manifest_file'] + file_manifest_filename = file_manifest_data['name'] + expected_hash = file_manifest_data['chksum_%s' % file_manifest_data['chksum_type']] + + # Verify the file manifest before using it to verify individual files + _verify_file_hash(b_collection_path, file_manifest_filename, expected_hash, modified_content) + file_manifest = get_json_from_validation_source(file_manifest_filename) + + collection_dirs = set() + collection_files = { + os.path.join(b_collection_path, b'MANIFEST.json'), + os.path.join(b_collection_path, b'FILES.json'), + } + + # Use the file manifest to verify individual file checksums + for manifest_data in file_manifest['files']: + name = manifest_data['name'] + + if manifest_data['ftype'] == 'file': + collection_files.add( + os.path.join(b_collection_path, to_bytes(name, errors='surrogate_or_strict')) + ) + expected_hash = manifest_data['chksum_%s' % manifest_data['chksum_type']] + _verify_file_hash(b_collection_path, name, expected_hash, modified_content) + + if manifest_data['ftype'] == 'dir': + collection_dirs.add( + os.path.join(b_collection_path, to_bytes(name, errors='surrogate_or_strict')) + ) + + # Find any paths not in the FILES.json + for root, dirs, files in os.walk(b_collection_path): + for name in files: + full_path = os.path.join(root, name) + path = to_text(full_path[len(b_collection_path) + 1::], errors='surrogate_or_strict') + + if full_path not in collection_files: + modified_content.append( + ModifiedContent(filename=path, expected='the file does not exist', installed='the file exists') + ) + for name in dirs: + full_path = os.path.join(root, name) + path = to_text(full_path[len(b_collection_path) + 1::], errors='surrogate_or_strict') + + if full_path not in collection_dirs: + modified_content.append( + ModifiedContent(filename=path, expected='the directory does not exist', installed='the directory exists') + ) + + if modified_content: + result.success = False + display.display( + 'Collection {fqcn!s} contains modified content ' + 'in the following files:'. + format(fqcn=to_text(local_collection.fqcn)), + ) + for content_change in modified_content: + display.display(' %s' % content_change.filename) + display.v(" Expected: %s\n Found: %s" % (content_change.expected, content_change.installed)) + else: + what = "are internally consistent with its manifest" if verify_local_only else "match the remote collection" + display.display( + "Successfully verified that checksums for '{coll!s}' {what!s}.". + format(coll=local_collection, what=what), + ) + + return result + + +def verify_file_signatures(fqcn, manifest_file, detached_signatures, keyring, required_successful_count, ignore_signature_errors): + # type: (str, str, list[str], str, str, list[str]) -> bool + successful = 0 + error_messages = [] + + signature_count_requirements = re.match(SIGNATURE_COUNT_RE, required_successful_count).groupdict() + + strict = signature_count_requirements['strict'] or False + require_all = signature_count_requirements['all'] + require_count = signature_count_requirements['count'] + if require_count is not None: + require_count = int(require_count) + + for signature in detached_signatures: + signature = to_text(signature, errors='surrogate_or_strict') + try: + verify_file_signature(manifest_file, signature, keyring, ignore_signature_errors) + except CollectionSignatureError as error: + if error.ignore: + # Do not include ignored errors in either the failed or successful count + continue + error_messages.append(error.report(fqcn)) + else: + successful += 1 + + if require_all: + continue + + if successful == require_count: + break + + if strict and not successful: + verified = False + display.display(f"Signature verification failed for '{fqcn}': no successful signatures") + elif require_all: + verified = not error_messages + if not verified: + display.display(f"Signature verification failed for '{fqcn}': some signatures failed") + else: + verified = not detached_signatures or require_count == successful + if not verified: + display.display(f"Signature verification failed for '{fqcn}': fewer successful signatures than required") + + if not verified: + for msg in error_messages: + display.vvvv(msg) + + return verified + + +def verify_file_signature(manifest_file, detached_signature, keyring, ignore_signature_errors): + # type: (str, str, str, list[str]) -> None + """Run the gpg command and parse any errors. Raises CollectionSignatureError on failure.""" + gpg_result, gpg_verification_rc = run_gpg_verify(manifest_file, detached_signature, keyring, display) + + if gpg_result: + errors = parse_gpg_errors(gpg_result) + try: + error = next(errors) + except StopIteration: + pass + else: + reasons = [] + ignored_reasons = 0 + + for error in chain([error], errors): + # Get error status (dict key) from the class (dict value) + status_code = list(GPG_ERROR_MAP.keys())[list(GPG_ERROR_MAP.values()).index(error.__class__)] + if status_code in ignore_signature_errors: + ignored_reasons += 1 + reasons.append(error.get_gpg_error_description()) + + ignore = len(reasons) == ignored_reasons + raise CollectionSignatureError(reasons=set(reasons), stdout=gpg_result, rc=gpg_verification_rc, ignore=ignore) + + if gpg_verification_rc: + raise CollectionSignatureError(stdout=gpg_result, rc=gpg_verification_rc) + + # No errors and rc is 0, verify was successful + return None + + +def build_collection(u_collection_path, u_output_path, force): + # type: (str, str, bool) -> str + """Creates the Ansible collection artifact in a .tar.gz file. + + :param u_collection_path: The path to the collection to build. This should be the directory that contains the + galaxy.yml file. + :param u_output_path: The path to create the collection build artifact. This should be a directory. + :param force: Whether to overwrite an existing collection build artifact or fail. + :return: The path to the collection build artifact. + """ + b_collection_path = to_bytes(u_collection_path, errors='surrogate_or_strict') + try: + collection_meta = _get_meta_from_src_dir(b_collection_path) + except LookupError as lookup_err: + raise_from(AnsibleError(to_native(lookup_err)), lookup_err) + + collection_manifest = _build_manifest(**collection_meta) + file_manifest = _build_files_manifest( + b_collection_path, + collection_meta['namespace'], # type: ignore[arg-type] + collection_meta['name'], # type: ignore[arg-type] + collection_meta['build_ignore'], # type: ignore[arg-type] + collection_meta['manifest'], # type: ignore[arg-type] + ) + + artifact_tarball_file_name = '{ns!s}-{name!s}-{ver!s}.tar.gz'.format( + name=collection_meta['name'], + ns=collection_meta['namespace'], + ver=collection_meta['version'], + ) + b_collection_output = os.path.join( + to_bytes(u_output_path), + to_bytes(artifact_tarball_file_name, errors='surrogate_or_strict'), + ) + + if os.path.exists(b_collection_output): + if os.path.isdir(b_collection_output): + raise AnsibleError("The output collection artifact '%s' already exists, " + "but is a directory - aborting" % to_native(b_collection_output)) + elif not force: + raise AnsibleError("The file '%s' already exists. You can use --force to re-create " + "the collection artifact." % to_native(b_collection_output)) + + collection_output = _build_collection_tar(b_collection_path, b_collection_output, collection_manifest, file_manifest) + return collection_output + + +def download_collections( + collections, # type: t.Iterable[Requirement] + output_path, # type: str + apis, # type: t.Iterable[GalaxyAPI] + no_deps, # type: bool + allow_pre_release, # type: bool + artifacts_manager, # type: ConcreteArtifactsManager +): # type: (...) -> None + """Download Ansible collections as their tarball from a Galaxy server to the path specified and creates a requirements + file of the downloaded requirements to be used for an install. + + :param collections: The collections to download, should be a list of tuples with (name, requirement, Galaxy Server). + :param output_path: The path to download the collections to. + :param apis: A list of GalaxyAPIs to query when search for a collection. + :param validate_certs: Whether to validate the certificate if downloading a tarball from a non-Galaxy host. + :param no_deps: Ignore any collection dependencies and only download the base requirements. + :param allow_pre_release: Do not ignore pre-release versions when selecting the latest. + """ + with _display_progress("Process download dependency map"): + dep_map = _resolve_depenency_map( + set(collections), + galaxy_apis=apis, + preferred_candidates=None, + concrete_artifacts_manager=artifacts_manager, + no_deps=no_deps, + allow_pre_release=allow_pre_release, + upgrade=False, + # Avoid overhead getting signatures since they are not currently applicable to downloaded collections + include_signatures=False, + offline=False, + ) + + b_output_path = to_bytes(output_path, errors='surrogate_or_strict') + + requirements = [] + with _display_progress( + "Starting collection download process to '{path!s}'". + format(path=output_path), + ): + for fqcn, concrete_coll_pin in dep_map.copy().items(): # FIXME: move into the provider + if concrete_coll_pin.is_virtual: + display.display( + 'Virtual collection {coll!s} is not downloadable'. + format(coll=to_text(concrete_coll_pin)), + ) + continue + + display.display( + u"Downloading collection '{coll!s}' to '{path!s}'". + format(coll=to_text(concrete_coll_pin), path=to_text(b_output_path)), + ) + + b_src_path = ( + artifacts_manager.get_artifact_path + if concrete_coll_pin.is_concrete_artifact + else artifacts_manager.get_galaxy_artifact_path + )(concrete_coll_pin) + + b_dest_path = os.path.join( + b_output_path, + os.path.basename(b_src_path), + ) + + if concrete_coll_pin.is_dir: + b_dest_path = to_bytes( + build_collection( + to_text(b_src_path, errors='surrogate_or_strict'), + to_text(output_path, errors='surrogate_or_strict'), + force=True, + ), + errors='surrogate_or_strict', + ) + else: + shutil.copy(to_native(b_src_path), to_native(b_dest_path)) + + display.display( + "Collection '{coll!s}' was downloaded successfully". + format(coll=concrete_coll_pin), + ) + requirements.append({ + # FIXME: Consider using a more specific upgraded format + # FIXME: having FQCN in the name field, with src field + # FIXME: pointing to the file path, and explicitly set + # FIXME: type. If version and name are set, it'd + # FIXME: perform validation against the actual metadata + # FIXME: in the artifact src points at. + 'name': to_native(os.path.basename(b_dest_path)), + 'version': concrete_coll_pin.ver, + }) + + requirements_path = os.path.join(output_path, 'requirements.yml') + b_requirements_path = to_bytes( + requirements_path, errors='surrogate_or_strict', + ) + display.display( + u'Writing requirements.yml file of downloaded collections ' + "to '{path!s}'".format(path=to_text(requirements_path)), + ) + yaml_bytes = to_bytes( + yaml_dump({'collections': requirements}), + errors='surrogate_or_strict', + ) + with open(b_requirements_path, mode='wb') as req_fd: + req_fd.write(yaml_bytes) + + +def publish_collection(collection_path, api, wait, timeout): + """Publish an Ansible collection tarball into an Ansible Galaxy server. + + :param collection_path: The path to the collection tarball to publish. + :param api: A GalaxyAPI to publish the collection to. + :param wait: Whether to wait until the import process is complete. + :param timeout: The time in seconds to wait for the import process to finish, 0 is indefinite. + """ + import_uri = api.publish_collection(collection_path) + + if wait: + # Galaxy returns a url fragment which differs between v2 and v3. The second to last entry is + # always the task_id, though. + # v2: {"task": "https://galaxy-dev.ansible.com/api/v2/collection-imports/35573/"} + # v3: {"task": "/api/automation-hub/v3/imports/collections/838d1308-a8f4-402c-95cb-7823f3806cd8/"} + task_id = None + for path_segment in reversed(import_uri.split('/')): + if path_segment: + task_id = path_segment + break + + if not task_id: + raise AnsibleError("Publishing the collection did not return valid task info. Cannot wait for task status. Returned task info: '%s'" % import_uri) + + with _display_progress( + "Collection has been published to the Galaxy server " + "{api.name!s} {api.api_server!s}".format(api=api), + ): + api.wait_import_task(task_id, timeout) + display.display("Collection has been successfully published and imported to the Galaxy server %s %s" + % (api.name, api.api_server)) + else: + display.display("Collection has been pushed to the Galaxy server %s %s, not waiting until import has " + "completed due to --no-wait being set. Import task results can be found at %s" + % (api.name, api.api_server, import_uri)) + + +def install_collections( + collections, # type: t.Iterable[Requirement] + output_path, # type: str + apis, # type: t.Iterable[GalaxyAPI] + ignore_errors, # type: bool + no_deps, # type: bool + force, # type: bool + force_deps, # type: bool + upgrade, # type: bool + allow_pre_release, # type: bool + artifacts_manager, # type: ConcreteArtifactsManager + disable_gpg_verify, # type: bool + offline, # type: bool +): # type: (...) -> None + """Install Ansible collections to the path specified. + + :param collections: The collections to install. + :param output_path: The path to install the collections to. + :param apis: A list of GalaxyAPIs to query when searching for a collection. + :param validate_certs: Whether to validate the certificates if downloading a tarball. + :param ignore_errors: Whether to ignore any errors when installing the collection. + :param no_deps: Ignore any collection dependencies and only install the base requirements. + :param force: Re-install a collection if it has already been installed. + :param force_deps: Re-install a collection as well as its dependencies if they have already been installed. + """ + existing_collections = { + Requirement(coll.fqcn, coll.ver, coll.src, coll.type, None) + for coll in find_existing_collections(output_path, artifacts_manager) + } + + unsatisfied_requirements = set( + chain.from_iterable( + ( + Requirement.from_dir_path(sub_coll, artifacts_manager) + for sub_coll in ( + artifacts_manager. + get_direct_collection_dependencies(install_req). + keys() + ) + ) + if install_req.is_subdirs else (install_req, ) + for install_req in collections + ), + ) + requested_requirements_names = {req.fqcn for req in unsatisfied_requirements} + + # NOTE: Don't attempt to reevaluate already installed deps + # NOTE: unless `--force` or `--force-with-deps` is passed + unsatisfied_requirements -= set() if force or force_deps else { + req + for req in unsatisfied_requirements + for exs in existing_collections + if req.fqcn == exs.fqcn and meets_requirements(exs.ver, req.ver) + } + + if not unsatisfied_requirements and not upgrade: + display.display( + 'Nothing to do. All requested collections are already ' + 'installed. If you want to reinstall them, ' + 'consider using `--force`.' + ) + return + + # FIXME: This probably needs to be improved to + # FIXME: properly match differing src/type. + existing_non_requested_collections = { + coll for coll in existing_collections + if coll.fqcn not in requested_requirements_names + } + + preferred_requirements = ( + [] if force_deps + else existing_non_requested_collections if force + else existing_collections + ) + preferred_collections = { + # NOTE: No need to include signatures if the collection is already installed + Candidate(coll.fqcn, coll.ver, coll.src, coll.type, None) + for coll in preferred_requirements + } + with _display_progress("Process install dependency map"): + dependency_map = _resolve_depenency_map( + collections, + galaxy_apis=apis, + preferred_candidates=preferred_collections, + concrete_artifacts_manager=artifacts_manager, + no_deps=no_deps, + allow_pre_release=allow_pre_release, + upgrade=upgrade, + include_signatures=not disable_gpg_verify, + offline=offline, + ) + + keyring_exists = artifacts_manager.keyring is not None + with _display_progress("Starting collection install process"): + for fqcn, concrete_coll_pin in dependency_map.items(): + if concrete_coll_pin.is_virtual: + display.vvvv( + "'{coll!s}' is virtual, skipping.". + format(coll=to_text(concrete_coll_pin)), + ) + continue + + if concrete_coll_pin in preferred_collections: + display.display( + "'{coll!s}' is already installed, skipping.". + format(coll=to_text(concrete_coll_pin)), + ) + continue + + if not disable_gpg_verify and concrete_coll_pin.signatures and not keyring_exists: + # Duplicate warning msgs are not displayed + display.warning( + "The GnuPG keyring used for collection signature " + "verification was not configured but signatures were " + "provided by the Galaxy server to verify authenticity. " + "Configure a keyring for ansible-galaxy to use " + "or disable signature verification. " + "Skipping signature verification." + ) + + try: + install(concrete_coll_pin, output_path, artifacts_manager) + except AnsibleError as err: + if ignore_errors: + display.warning( + 'Failed to install collection {coll!s} but skipping ' + 'due to --ignore-errors being set. Error: {error!s}'. + format( + coll=to_text(concrete_coll_pin), + error=to_text(err), + ) + ) + else: + raise + + +# NOTE: imported in ansible.cli.galaxy +def validate_collection_name(name): # type: (str) -> str + """Validates the collection name as an input from the user or a requirements file fit the requirements. + + :param name: The input name with optional range specifier split by ':'. + :return: The input value, required for argparse validation. + """ + collection, dummy, dummy = name.partition(':') + if AnsibleCollectionRef.is_valid_collection_name(collection): + return name + + raise AnsibleError("Invalid collection name '%s', " + "name must be in the format <namespace>.<collection>. \n" + "Please make sure namespace and collection name contains " + "characters from [a-zA-Z0-9_] only." % name) + + +# NOTE: imported in ansible.cli.galaxy +def validate_collection_path(collection_path): # type: (str) -> str + """Ensure a given path ends with 'ansible_collections' + + :param collection_path: The path that should end in 'ansible_collections' + :return: collection_path ending in 'ansible_collections' if it does not already. + """ + + if os.path.split(collection_path)[1] != 'ansible_collections': + return os.path.join(collection_path, 'ansible_collections') + + return collection_path + + +def verify_collections( + collections, # type: t.Iterable[Requirement] + search_paths, # type: t.Iterable[str] + apis, # type: t.Iterable[GalaxyAPI] + ignore_errors, # type: bool + local_verify_only, # type: bool + artifacts_manager, # type: ConcreteArtifactsManager +): # type: (...) -> list[CollectionVerifyResult] + r"""Verify the integrity of locally installed collections. + + :param collections: The collections to check. + :param search_paths: Locations for the local collection lookup. + :param apis: A list of GalaxyAPIs to query when searching for a collection. + :param ignore_errors: Whether to ignore any errors when verifying the collection. + :param local_verify_only: When True, skip downloads and only verify local manifests. + :param artifacts_manager: Artifacts manager. + :return: list of CollectionVerifyResult objects describing the results of each collection verification + """ + results = [] # type: list[CollectionVerifyResult] + + api_proxy = MultiGalaxyAPIProxy(apis, artifacts_manager) + + with _display_progress(): + for collection in collections: + try: + if collection.is_concrete_artifact: + raise AnsibleError( + message="'{coll_type!s}' type is not supported. " + 'The format namespace.name is expected.'. + format(coll_type=collection.type) + ) + + # NOTE: Verify local collection exists before + # NOTE: downloading its source artifact from + # NOTE: a galaxy server. + default_err = 'Collection %s is not installed in any of the collection paths.' % collection.fqcn + for search_path in search_paths: + b_search_path = to_bytes( + os.path.join( + search_path, + collection.namespace, collection.name, + ), + errors='surrogate_or_strict', + ) + if not os.path.isdir(b_search_path): + continue + if not _is_installed_collection_dir(b_search_path): + default_err = ( + "Collection %s does not have a MANIFEST.json. " + "A MANIFEST.json is expected if the collection has been built " + "and installed via ansible-galaxy" % collection.fqcn + ) + continue + + local_collection = Candidate.from_dir_path( + b_search_path, artifacts_manager, + ) + supplemental_signatures = [ + get_signature_from_source(source, display) + for source in collection.signature_sources or [] + ] + local_collection = Candidate( + local_collection.fqcn, + local_collection.ver, + local_collection.src, + local_collection.type, + signatures=frozenset(supplemental_signatures), + ) + + break + else: + raise AnsibleError(message=default_err) + + if local_verify_only: + remote_collection = None + else: + signatures = api_proxy.get_signatures(local_collection) + signatures.extend([ + get_signature_from_source(source, display) + for source in collection.signature_sources or [] + ]) + + remote_collection = Candidate( + collection.fqcn, + collection.ver if collection.ver != '*' + else local_collection.ver, + None, 'galaxy', + frozenset(signatures), + ) + + # Download collection on a galaxy server for comparison + try: + # NOTE: If there are no signatures, trigger the lookup. If found, + # NOTE: it'll cache download URL and token in artifact manager. + # NOTE: If there are no Galaxy server signatures, only user-provided signature URLs, + # NOTE: those alone validate the MANIFEST.json and the remote collection is not downloaded. + # NOTE: The remote MANIFEST.json is only used in verification if there are no signatures. + if not signatures and not collection.signature_sources: + api_proxy.get_collection_version_metadata( + remote_collection, + ) + except AnsibleError as e: # FIXME: does this actually emit any errors? + # FIXME: extract the actual message and adjust this: + expected_error_msg = ( + 'Failed to find collection {coll.fqcn!s}:{coll.ver!s}'. + format(coll=collection) + ) + if e.message == expected_error_msg: + raise AnsibleError( + 'Failed to find remote collection ' + "'{coll!s}' on any of the galaxy servers". + format(coll=collection) + ) + raise + + result = verify_local_collection(local_collection, remote_collection, artifacts_manager) + + results.append(result) + + except AnsibleError as err: + if ignore_errors: + display.warning( + "Failed to verify collection '{coll!s}' but skipping " + 'due to --ignore-errors being set. ' + 'Error: {err!s}'. + format(coll=collection, err=to_text(err)), + ) + else: + raise + + return results + + +@contextmanager +def _tempdir(): + b_temp_path = tempfile.mkdtemp(dir=to_bytes(C.DEFAULT_LOCAL_TMP, errors='surrogate_or_strict')) + try: + yield b_temp_path + finally: + shutil.rmtree(b_temp_path) + + +@contextmanager +def _display_progress(msg=None): + config_display = C.GALAXY_DISPLAY_PROGRESS + display_wheel = sys.stdout.isatty() if config_display is None else config_display + + global display + if msg is not None: + display.display(msg) + + if not display_wheel: + yield + return + + def progress(display_queue, actual_display): + actual_display.debug("Starting display_progress display thread") + t = threading.current_thread() + + while True: + for c in "|/-\\": + actual_display.display(c + "\b", newline=False) + time.sleep(0.1) + + # Display a message from the main thread + while True: + try: + method, args, kwargs = display_queue.get(block=False, timeout=0.1) + except queue.Empty: + break + else: + func = getattr(actual_display, method) + func(*args, **kwargs) + + if getattr(t, "finish", False): + actual_display.debug("Received end signal for display_progress display thread") + return + + class DisplayThread(object): + + def __init__(self, display_queue): + self.display_queue = display_queue + + def __getattr__(self, attr): + def call_display(*args, **kwargs): + self.display_queue.put((attr, args, kwargs)) + + return call_display + + # Temporary override the global display class with our own which add the calls to a queue for the thread to call. + old_display = display + try: + display_queue = queue.Queue() + display = DisplayThread(display_queue) + t = threading.Thread(target=progress, args=(display_queue, old_display)) + t.daemon = True + t.start() + + try: + yield + finally: + t.finish = True + t.join() + except Exception: + # The exception is re-raised so we can sure the thread is finished and not using the display anymore + raise + finally: + display = old_display + + +def _verify_file_hash(b_path, filename, expected_hash, error_queue): + b_file_path = to_bytes(os.path.join(to_text(b_path), filename), errors='surrogate_or_strict') + + if not os.path.isfile(b_file_path): + actual_hash = None + else: + with open(b_file_path, mode='rb') as file_object: + actual_hash = _consume_file(file_object) + + if expected_hash != actual_hash: + error_queue.append(ModifiedContent(filename=filename, expected=expected_hash, installed=actual_hash)) + + +def _make_manifest(): + return { + 'files': [ + { + 'name': '.', + 'ftype': 'dir', + 'chksum_type': None, + 'chksum_sha256': None, + 'format': MANIFEST_FORMAT, + }, + ], + 'format': MANIFEST_FORMAT, + } + + +def _make_entry(name, ftype, chksum_type='sha256', chksum=None): + return { + 'name': name, + 'ftype': ftype, + 'chksum_type': chksum_type if chksum else None, + f'chksum_{chksum_type}': chksum, + 'format': MANIFEST_FORMAT + } + + +def _build_files_manifest(b_collection_path, namespace, name, ignore_patterns, manifest_control): + # type: (bytes, str, str, list[str], dict[str, t.Any]) -> FilesManifestType + if ignore_patterns and manifest_control is not Sentinel: + raise AnsibleError('"build_ignore" and "manifest" are mutually exclusive') + + if manifest_control is not Sentinel: + return _build_files_manifest_distlib( + b_collection_path, + namespace, + name, + manifest_control, + ) + + return _build_files_manifest_walk(b_collection_path, namespace, name, ignore_patterns) + + +def _build_files_manifest_distlib(b_collection_path, namespace, name, manifest_control): + # type: (bytes, str, str, dict[str, t.Any]) -> FilesManifestType + + if not HAS_DISTLIB: + raise AnsibleError('Use of "manifest" requires the python "distlib" library') + + if manifest_control is None: + manifest_control = {} + + try: + control = ManifestControl(**manifest_control) + except TypeError as ex: + raise AnsibleError(f'Invalid "manifest" provided: {ex}') + + if not is_sequence(control.directives): + raise AnsibleError(f'"manifest.directives" must be a list, got: {control.directives.__class__.__name__}') + + if not isinstance(control.omit_default_directives, bool): + raise AnsibleError( + '"manifest.omit_default_directives" is expected to be a boolean, got: ' + f'{control.omit_default_directives.__class__.__name__}' + ) + + if control.omit_default_directives and not control.directives: + raise AnsibleError( + '"manifest.omit_default_directives" was set to True, but no directives were defined ' + 'in "manifest.directives". This would produce an empty collection artifact.' + ) + + directives = [] + if control.omit_default_directives: + directives.extend(control.directives) + else: + directives.extend([ + 'include meta/*.yml', + 'include *.txt *.md *.rst COPYING LICENSE', + 'recursive-include tests **', + 'recursive-include docs **.rst **.yml **.yaml **.json **.j2 **.txt', + 'recursive-include roles **.yml **.yaml **.json **.j2', + 'recursive-include playbooks **.yml **.yaml **.json', + 'recursive-include changelogs **.yml **.yaml', + 'recursive-include plugins */**.py', + ]) + + plugins = set(l.package.split('.')[-1] for d, l in get_all_plugin_loaders()) + for plugin in sorted(plugins): + if plugin in ('modules', 'module_utils'): + continue + elif plugin in C.DOCUMENTABLE_PLUGINS: + directives.append( + f'recursive-include plugins/{plugin} **.yml **.yaml' + ) + + directives.extend([ + 'recursive-include plugins/modules **.ps1 **.yml **.yaml', + 'recursive-include plugins/module_utils **.ps1 **.psm1 **.cs', + ]) + + directives.extend(control.directives) + + directives.extend([ + f'exclude galaxy.yml galaxy.yaml MANIFEST.json FILES.json {namespace}-{name}-*.tar.gz', + 'recursive-exclude tests/output **', + 'global-exclude /.* /__pycache__', + ]) + + display.vvv('Manifest Directives:') + display.vvv(textwrap.indent('\n'.join(directives), ' ')) + + u_collection_path = to_text(b_collection_path, errors='surrogate_or_strict') + m = Manifest(u_collection_path) + for directive in directives: + try: + m.process_directive(directive) + except DistlibException as e: + raise AnsibleError(f'Invalid manifest directive: {e}') + except Exception as e: + raise AnsibleError(f'Unknown error processing manifest directive: {e}') + + manifest = _make_manifest() + + for abs_path in m.sorted(wantdirs=True): + rel_path = os.path.relpath(abs_path, u_collection_path) + if os.path.isdir(abs_path): + manifest_entry = _make_entry(rel_path, 'dir') + else: + manifest_entry = _make_entry( + rel_path, + 'file', + chksum_type='sha256', + chksum=secure_hash(abs_path, hash_func=sha256) + ) + + manifest['files'].append(manifest_entry) + + return manifest + + +def _build_files_manifest_walk(b_collection_path, namespace, name, ignore_patterns): + # type: (bytes, str, str, list[str]) -> FilesManifestType + # We always ignore .pyc and .retry files as well as some well known version control directories. The ignore + # patterns can be extended by the build_ignore key in galaxy.yml + b_ignore_patterns = [ + b'MANIFEST.json', + b'FILES.json', + b'galaxy.yml', + b'galaxy.yaml', + b'.git', + b'*.pyc', + b'*.retry', + b'tests/output', # Ignore ansible-test result output directory. + to_bytes('{0}-{1}-*.tar.gz'.format(namespace, name)), # Ignores previously built artifacts in the root dir. + ] + b_ignore_patterns += [to_bytes(p) for p in ignore_patterns] + b_ignore_dirs = frozenset([b'CVS', b'.bzr', b'.hg', b'.git', b'.svn', b'__pycache__', b'.tox']) + + manifest = _make_manifest() + + def _walk(b_path, b_top_level_dir): + for b_item in os.listdir(b_path): + b_abs_path = os.path.join(b_path, b_item) + b_rel_base_dir = b'' if b_path == b_top_level_dir else b_path[len(b_top_level_dir) + 1:] + b_rel_path = os.path.join(b_rel_base_dir, b_item) + rel_path = to_text(b_rel_path, errors='surrogate_or_strict') + + if os.path.isdir(b_abs_path): + if any(b_item == b_path for b_path in b_ignore_dirs) or \ + any(fnmatch.fnmatch(b_rel_path, b_pattern) for b_pattern in b_ignore_patterns): + display.vvv("Skipping '%s' for collection build" % to_text(b_abs_path)) + continue + + if os.path.islink(b_abs_path): + b_link_target = os.path.realpath(b_abs_path) + + if not _is_child_path(b_link_target, b_top_level_dir): + display.warning("Skipping '%s' as it is a symbolic link to a directory outside the collection" + % to_text(b_abs_path)) + continue + + manifest['files'].append(_make_entry(rel_path, 'dir')) + + if not os.path.islink(b_abs_path): + _walk(b_abs_path, b_top_level_dir) + else: + if any(fnmatch.fnmatch(b_rel_path, b_pattern) for b_pattern in b_ignore_patterns): + display.vvv("Skipping '%s' for collection build" % to_text(b_abs_path)) + continue + + # Handling of file symlinks occur in _build_collection_tar, the manifest for a symlink is the same for + # a normal file. + manifest['files'].append( + _make_entry( + rel_path, + 'file', + chksum_type='sha256', + chksum=secure_hash(b_abs_path, hash_func=sha256) + ) + ) + + _walk(b_collection_path, b_collection_path) + + return manifest + + +# FIXME: accept a dict produced from `galaxy.yml` instead of separate args +def _build_manifest(namespace, name, version, authors, readme, tags, description, license_file, + dependencies, repository, documentation, homepage, issues, **kwargs): + manifest = { + 'collection_info': { + 'namespace': namespace, + 'name': name, + 'version': version, + 'authors': authors, + 'readme': readme, + 'tags': tags, + 'description': description, + 'license': kwargs['license'], + 'license_file': license_file or None, # Handle galaxy.yml having an empty string (None) + 'dependencies': dependencies, + 'repository': repository, + 'documentation': documentation, + 'homepage': homepage, + 'issues': issues, + }, + 'file_manifest_file': { + 'name': 'FILES.json', + 'ftype': 'file', + 'chksum_type': 'sha256', + 'chksum_sha256': None, # Filled out in _build_collection_tar + 'format': MANIFEST_FORMAT + }, + 'format': MANIFEST_FORMAT, + } + + return manifest + + +def _build_collection_tar( + b_collection_path, # type: bytes + b_tar_path, # type: bytes + collection_manifest, # type: CollectionManifestType + file_manifest, # type: FilesManifestType +): # type: (...) -> str + """Build a tar.gz collection artifact from the manifest data.""" + files_manifest_json = to_bytes(json.dumps(file_manifest, indent=True), errors='surrogate_or_strict') + collection_manifest['file_manifest_file']['chksum_sha256'] = secure_hash_s(files_manifest_json, hash_func=sha256) + collection_manifest_json = to_bytes(json.dumps(collection_manifest, indent=True), errors='surrogate_or_strict') + + with _tempdir() as b_temp_path: + b_tar_filepath = os.path.join(b_temp_path, os.path.basename(b_tar_path)) + + with tarfile.open(b_tar_filepath, mode='w:gz') as tar_file: + # Add the MANIFEST.json and FILES.json file to the archive + for name, b in [(MANIFEST_FILENAME, collection_manifest_json), ('FILES.json', files_manifest_json)]: + b_io = BytesIO(b) + tar_info = tarfile.TarInfo(name) + tar_info.size = len(b) + tar_info.mtime = int(time.time()) + tar_info.mode = 0o0644 + tar_file.addfile(tarinfo=tar_info, fileobj=b_io) + + for file_info in file_manifest['files']: # type: ignore[union-attr] + if file_info['name'] == '.': + continue + + # arcname expects a native string, cannot be bytes + filename = to_native(file_info['name'], errors='surrogate_or_strict') + b_src_path = os.path.join(b_collection_path, to_bytes(filename, errors='surrogate_or_strict')) + + def reset_stat(tarinfo): + if tarinfo.type != tarfile.SYMTYPE: + existing_is_exec = tarinfo.mode & stat.S_IXUSR + tarinfo.mode = 0o0755 if existing_is_exec or tarinfo.isdir() else 0o0644 + tarinfo.uid = tarinfo.gid = 0 + tarinfo.uname = tarinfo.gname = '' + + return tarinfo + + if os.path.islink(b_src_path): + b_link_target = os.path.realpath(b_src_path) + if _is_child_path(b_link_target, b_collection_path): + b_rel_path = os.path.relpath(b_link_target, start=os.path.dirname(b_src_path)) + + tar_info = tarfile.TarInfo(filename) + tar_info.type = tarfile.SYMTYPE + tar_info.linkname = to_native(b_rel_path, errors='surrogate_or_strict') + tar_info = reset_stat(tar_info) + tar_file.addfile(tarinfo=tar_info) + + continue + + # Dealing with a normal file, just add it by name. + tar_file.add( + to_native(os.path.realpath(b_src_path)), + arcname=filename, + recursive=False, + filter=reset_stat, + ) + + shutil.copy(to_native(b_tar_filepath), to_native(b_tar_path)) + collection_name = "%s.%s" % (collection_manifest['collection_info']['namespace'], + collection_manifest['collection_info']['name']) + tar_path = to_text(b_tar_path) + display.display(u'Created collection for %s at %s' % (collection_name, tar_path)) + return tar_path + + +def _build_collection_dir(b_collection_path, b_collection_output, collection_manifest, file_manifest): + """Build a collection directory from the manifest data. + + This should follow the same pattern as _build_collection_tar. + """ + os.makedirs(b_collection_output, mode=0o0755) + + files_manifest_json = to_bytes(json.dumps(file_manifest, indent=True), errors='surrogate_or_strict') + collection_manifest['file_manifest_file']['chksum_sha256'] = secure_hash_s(files_manifest_json, hash_func=sha256) + collection_manifest_json = to_bytes(json.dumps(collection_manifest, indent=True), errors='surrogate_or_strict') + + # Write contents to the files + for name, b in [(MANIFEST_FILENAME, collection_manifest_json), ('FILES.json', files_manifest_json)]: + b_path = os.path.join(b_collection_output, to_bytes(name, errors='surrogate_or_strict')) + with open(b_path, 'wb') as file_obj, BytesIO(b) as b_io: + shutil.copyfileobj(b_io, file_obj) + + os.chmod(b_path, 0o0644) + + base_directories = [] + for file_info in sorted(file_manifest['files'], key=lambda x: x['name']): + if file_info['name'] == '.': + continue + + src_file = os.path.join(b_collection_path, to_bytes(file_info['name'], errors='surrogate_or_strict')) + dest_file = os.path.join(b_collection_output, to_bytes(file_info['name'], errors='surrogate_or_strict')) + + existing_is_exec = os.stat(src_file).st_mode & stat.S_IXUSR + mode = 0o0755 if existing_is_exec else 0o0644 + + if os.path.isdir(src_file): + mode = 0o0755 + base_directories.append(src_file) + os.mkdir(dest_file, mode) + else: + shutil.copyfile(src_file, dest_file) + + os.chmod(dest_file, mode) + collection_output = to_text(b_collection_output) + return collection_output + + +def find_existing_collections(path, artifacts_manager): + """Locate all collections under a given path. + + :param path: Collection dirs layout search path. + :param artifacts_manager: Artifacts manager. + """ + b_path = to_bytes(path, errors='surrogate_or_strict') + + # FIXME: consider using `glob.glob()` to simplify looping + for b_namespace in os.listdir(b_path): + b_namespace_path = os.path.join(b_path, b_namespace) + if os.path.isfile(b_namespace_path): + continue + + # FIXME: consider feeding b_namespace_path to Candidate.from_dir_path to get subdirs automatically + for b_collection in os.listdir(b_namespace_path): + b_collection_path = os.path.join(b_namespace_path, b_collection) + if not os.path.isdir(b_collection_path): + continue + + try: + req = Candidate.from_dir_path_as_unknown(b_collection_path, artifacts_manager) + except ValueError as val_err: + raise_from(AnsibleError(val_err), val_err) + + display.vvv( + u"Found installed collection {coll!s} at '{path!s}'". + format(coll=to_text(req), path=to_text(req.src)) + ) + yield req + + +def install(collection, path, artifacts_manager): # FIXME: mv to dataclasses? + # type: (Candidate, str, ConcreteArtifactsManager) -> None + """Install a collection under a given path. + + :param collection: Collection to be installed. + :param path: Collection dirs layout path. + :param artifacts_manager: Artifacts manager. + """ + b_artifact_path = ( + artifacts_manager.get_artifact_path if collection.is_concrete_artifact + else artifacts_manager.get_galaxy_artifact_path + )(collection) + + collection_path = os.path.join(path, collection.namespace, collection.name) + b_collection_path = to_bytes(collection_path, errors='surrogate_or_strict') + display.display( + u"Installing '{coll!s}' to '{path!s}'". + format(coll=to_text(collection), path=collection_path), + ) + + if os.path.exists(b_collection_path): + shutil.rmtree(b_collection_path) + + if collection.is_dir: + install_src(collection, b_artifact_path, b_collection_path, artifacts_manager) + else: + install_artifact( + b_artifact_path, + b_collection_path, + artifacts_manager._b_working_directory, + collection.signatures, + artifacts_manager.keyring, + artifacts_manager.required_successful_signature_count, + artifacts_manager.ignore_signature_errors, + ) + if (collection.is_online_index_pointer and isinstance(collection.src, GalaxyAPI)): + write_source_metadata( + collection, + b_collection_path, + artifacts_manager + ) + + display.display( + '{coll!s} was installed successfully'. + format(coll=to_text(collection)), + ) + + +def write_source_metadata(collection, b_collection_path, artifacts_manager): + # type: (Candidate, bytes, ConcreteArtifactsManager) -> None + source_data = artifacts_manager.get_galaxy_artifact_source_info(collection) + + b_yaml_source_data = to_bytes(yaml_dump(source_data), errors='surrogate_or_strict') + b_info_dest = collection.construct_galaxy_info_path(b_collection_path) + b_info_dir = os.path.split(b_info_dest)[0] + + if os.path.exists(b_info_dir): + shutil.rmtree(b_info_dir) + + try: + os.mkdir(b_info_dir, mode=0o0755) + with open(b_info_dest, mode='w+b') as fd: + fd.write(b_yaml_source_data) + os.chmod(b_info_dest, 0o0644) + except Exception: + # Ensure we don't leave the dir behind in case of a failure. + if os.path.isdir(b_info_dir): + shutil.rmtree(b_info_dir) + raise + + +def verify_artifact_manifest(manifest_file, signatures, keyring, required_signature_count, ignore_signature_errors): + # type: (str, list[str], str, str, list[str]) -> None + failed_verify = False + coll_path_parts = to_text(manifest_file, errors='surrogate_or_strict').split(os.path.sep) + collection_name = '%s.%s' % (coll_path_parts[-3], coll_path_parts[-2]) # get 'ns' and 'coll' from /path/to/ns/coll/MANIFEST.json + if not verify_file_signatures(collection_name, manifest_file, signatures, keyring, required_signature_count, ignore_signature_errors): + raise AnsibleError(f"Not installing {collection_name} because GnuPG signature verification failed.") + display.vvvv(f"GnuPG signature verification succeeded for {collection_name}") + + +def install_artifact(b_coll_targz_path, b_collection_path, b_temp_path, signatures, keyring, required_signature_count, ignore_signature_errors): + """Install a collection from tarball under a given path. + + :param b_coll_targz_path: Collection tarball to be installed. + :param b_collection_path: Collection dirs layout path. + :param b_temp_path: Temporary dir path. + :param signatures: frozenset of signatures to verify the MANIFEST.json + :param keyring: The keyring used during GPG verification + :param required_signature_count: The number of signatures that must successfully verify the collection + :param ignore_signature_errors: GPG errors to ignore during signature verification + """ + try: + with tarfile.open(b_coll_targz_path, mode='r') as collection_tar: + # Verify the signature on the MANIFEST.json before extracting anything else + _extract_tar_file(collection_tar, MANIFEST_FILENAME, b_collection_path, b_temp_path) + + if keyring is not None: + manifest_file = os.path.join(to_text(b_collection_path, errors='surrogate_or_strict'), MANIFEST_FILENAME) + verify_artifact_manifest(manifest_file, signatures, keyring, required_signature_count, ignore_signature_errors) + + files_member_obj = collection_tar.getmember('FILES.json') + with _tarfile_extract(collection_tar, files_member_obj) as (dummy, files_obj): + files = json.loads(to_text(files_obj.read(), errors='surrogate_or_strict')) + + _extract_tar_file(collection_tar, 'FILES.json', b_collection_path, b_temp_path) + + for file_info in files['files']: + file_name = file_info['name'] + if file_name == '.': + continue + + if file_info['ftype'] == 'file': + _extract_tar_file(collection_tar, file_name, b_collection_path, b_temp_path, + expected_hash=file_info['chksum_sha256']) + + else: + _extract_tar_dir(collection_tar, file_name, b_collection_path) + + except Exception: + # Ensure we don't leave the dir behind in case of a failure. + shutil.rmtree(b_collection_path) + + b_namespace_path = os.path.dirname(b_collection_path) + if not os.listdir(b_namespace_path): + os.rmdir(b_namespace_path) + + raise + + +def install_src(collection, b_collection_path, b_collection_output_path, artifacts_manager): + r"""Install the collection from source control into given dir. + + Generates the Ansible collection artifact data from a galaxy.yml and + installs the artifact to a directory. + This should follow the same pattern as build_collection, but instead + of creating an artifact, install it. + + :param collection: Collection to be installed. + :param b_collection_path: Collection dirs layout path. + :param b_collection_output_path: The installation directory for the \ + collection artifact. + :param artifacts_manager: Artifacts manager. + + :raises AnsibleError: If no collection metadata found. + """ + collection_meta = artifacts_manager.get_direct_collection_meta(collection) + + if 'build_ignore' not in collection_meta: # installed collection, not src + # FIXME: optimize this? use a different process? copy instead of build? + collection_meta['build_ignore'] = [] + collection_meta['manifest'] = Sentinel + collection_manifest = _build_manifest(**collection_meta) + file_manifest = _build_files_manifest( + b_collection_path, + collection_meta['namespace'], collection_meta['name'], + collection_meta['build_ignore'], + collection_meta['manifest'], + ) + + collection_output_path = _build_collection_dir( + b_collection_path, b_collection_output_path, + collection_manifest, file_manifest, + ) + + display.display( + 'Created collection for {coll!s} at {path!s}'. + format(coll=collection, path=collection_output_path) + ) + + +def _extract_tar_dir(tar, dirname, b_dest): + """ Extracts a directory from a collection tar. """ + member_names = [to_native(dirname, errors='surrogate_or_strict')] + + # Create list of members with and without trailing separator + if not member_names[-1].endswith(os.path.sep): + member_names.append(member_names[-1] + os.path.sep) + + # Try all of the member names and stop on the first one that are able to successfully get + for member in member_names: + try: + tar_member = tar.getmember(member) + except KeyError: + continue + break + else: + # If we still can't find the member, raise a nice error. + raise AnsibleError("Unable to extract '%s' from collection" % to_native(member, errors='surrogate_or_strict')) + + b_dir_path = os.path.join(b_dest, to_bytes(dirname, errors='surrogate_or_strict')) + + b_parent_path = os.path.dirname(b_dir_path) + try: + os.makedirs(b_parent_path, mode=0o0755) + except OSError as e: + if e.errno != errno.EEXIST: + raise + + if tar_member.type == tarfile.SYMTYPE: + b_link_path = to_bytes(tar_member.linkname, errors='surrogate_or_strict') + if not _is_child_path(b_link_path, b_dest, link_name=b_dir_path): + raise AnsibleError("Cannot extract symlink '%s' in collection: path points to location outside of " + "collection '%s'" % (to_native(dirname), b_link_path)) + + os.symlink(b_link_path, b_dir_path) + + else: + if not os.path.isdir(b_dir_path): + os.mkdir(b_dir_path, 0o0755) + + +def _extract_tar_file(tar, filename, b_dest, b_temp_path, expected_hash=None): + """ Extracts a file from a collection tar. """ + with _get_tar_file_member(tar, filename) as (tar_member, tar_obj): + if tar_member.type == tarfile.SYMTYPE: + actual_hash = _consume_file(tar_obj) + + else: + with tempfile.NamedTemporaryFile(dir=b_temp_path, delete=False) as tmpfile_obj: + actual_hash = _consume_file(tar_obj, tmpfile_obj) + + if expected_hash and actual_hash != expected_hash: + raise AnsibleError("Checksum mismatch for '%s' inside collection at '%s'" + % (to_native(filename, errors='surrogate_or_strict'), to_native(tar.name))) + + b_dest_filepath = os.path.abspath(os.path.join(b_dest, to_bytes(filename, errors='surrogate_or_strict'))) + b_parent_dir = os.path.dirname(b_dest_filepath) + if not _is_child_path(b_parent_dir, b_dest): + raise AnsibleError("Cannot extract tar entry '%s' as it will be placed outside the collection directory" + % to_native(filename, errors='surrogate_or_strict')) + + if not os.path.exists(b_parent_dir): + # Seems like Galaxy does not validate if all file entries have a corresponding dir ftype entry. This check + # makes sure we create the parent directory even if it wasn't set in the metadata. + os.makedirs(b_parent_dir, mode=0o0755) + + if tar_member.type == tarfile.SYMTYPE: + b_link_path = to_bytes(tar_member.linkname, errors='surrogate_or_strict') + if not _is_child_path(b_link_path, b_dest, link_name=b_dest_filepath): + raise AnsibleError("Cannot extract symlink '%s' in collection: path points to location outside of " + "collection '%s'" % (to_native(filename), b_link_path)) + + os.symlink(b_link_path, b_dest_filepath) + + else: + shutil.move(to_bytes(tmpfile_obj.name, errors='surrogate_or_strict'), b_dest_filepath) + + # Default to rw-r--r-- and only add execute if the tar file has execute. + tar_member = tar.getmember(to_native(filename, errors='surrogate_or_strict')) + new_mode = 0o644 + if stat.S_IMODE(tar_member.mode) & stat.S_IXUSR: + new_mode |= 0o0111 + + os.chmod(b_dest_filepath, new_mode) + + +def _get_tar_file_member(tar, filename): + n_filename = to_native(filename, errors='surrogate_or_strict') + try: + member = tar.getmember(n_filename) + except KeyError: + raise AnsibleError("Collection tar at '%s' does not contain the expected file '%s'." % ( + to_native(tar.name), + n_filename)) + + return _tarfile_extract(tar, member) + + +def _get_json_from_tar_file(b_path, filename): + file_contents = '' + + with tarfile.open(b_path, mode='r') as collection_tar: + with _get_tar_file_member(collection_tar, filename) as (dummy, tar_obj): + bufsize = 65536 + data = tar_obj.read(bufsize) + while data: + file_contents += to_text(data) + data = tar_obj.read(bufsize) + + return json.loads(file_contents) + + +def _get_tar_file_hash(b_path, filename): + with tarfile.open(b_path, mode='r') as collection_tar: + with _get_tar_file_member(collection_tar, filename) as (dummy, tar_obj): + return _consume_file(tar_obj) + + +def _get_file_hash(b_path, filename): # type: (bytes, str) -> str + filepath = os.path.join(b_path, to_bytes(filename, errors='surrogate_or_strict')) + with open(filepath, 'rb') as fp: + return _consume_file(fp) + + +def _is_child_path(path, parent_path, link_name=None): + """ Checks that path is a path within the parent_path specified. """ + b_path = to_bytes(path, errors='surrogate_or_strict') + + if link_name and not os.path.isabs(b_path): + # If link_name is specified, path is the source of the link and we need to resolve the absolute path. + b_link_dir = os.path.dirname(to_bytes(link_name, errors='surrogate_or_strict')) + b_path = os.path.abspath(os.path.join(b_link_dir, b_path)) + + b_parent_path = to_bytes(parent_path, errors='surrogate_or_strict') + return b_path == b_parent_path or b_path.startswith(b_parent_path + to_bytes(os.path.sep)) + + +def _resolve_depenency_map( + requested_requirements, # type: t.Iterable[Requirement] + galaxy_apis, # type: t.Iterable[GalaxyAPI] + concrete_artifacts_manager, # type: ConcreteArtifactsManager + preferred_candidates, # type: t.Iterable[Candidate] | None + no_deps, # type: bool + allow_pre_release, # type: bool + upgrade, # type: bool + include_signatures, # type: bool + offline, # type: bool +): # type: (...) -> dict[str, Candidate] + """Return the resolved dependency map.""" + if not HAS_RESOLVELIB: + raise AnsibleError("Failed to import resolvelib, check that a supported version is installed") + if not HAS_PACKAGING: + raise AnsibleError("Failed to import packaging, check that a supported version is installed") + + req = None + + try: + dist = distribution('ansible-core') + except Exception: + pass + else: + req = next((rr for r in (dist.requires or []) if (rr := PkgReq(r)).name == 'resolvelib'), None) + finally: + if req is None: + # TODO: replace the hardcoded versions with a warning if the dist info is missing + # display.warning("Unable to find 'ansible-core' distribution requirements to verify the resolvelib version is supported.") + if not RESOLVELIB_LOWERBOUND <= RESOLVELIB_VERSION < RESOLVELIB_UPPERBOUND: + raise AnsibleError( + f"ansible-galaxy requires resolvelib<{RESOLVELIB_UPPERBOUND.vstring},>={RESOLVELIB_LOWERBOUND.vstring}" + ) + elif not req.specifier.contains(RESOLVELIB_VERSION.vstring): + raise AnsibleError(f"ansible-galaxy requires {req.name}{req.specifier}") + + collection_dep_resolver = build_collection_dependency_resolver( + galaxy_apis=galaxy_apis, + concrete_artifacts_manager=concrete_artifacts_manager, + user_requirements=requested_requirements, + preferred_candidates=preferred_candidates, + with_deps=not no_deps, + with_pre_releases=allow_pre_release, + upgrade=upgrade, + include_signatures=include_signatures, + offline=offline, + ) + try: + return collection_dep_resolver.resolve( + requested_requirements, + max_rounds=2000000, # NOTE: same constant pip uses + ).mapping + except CollectionDependencyResolutionImpossible as dep_exc: + conflict_causes = ( + '* {req.fqcn!s}:{req.ver!s} ({dep_origin!s})'.format( + req=req_inf.requirement, + dep_origin='direct request' + if req_inf.parent is None + else 'dependency of {parent!s}'. + format(parent=req_inf.parent), + ) + for req_inf in dep_exc.causes + ) + error_msg_lines = list(chain( + ( + 'Failed to resolve the requested ' + 'dependencies map. Could not satisfy the following ' + 'requirements:', + ), + conflict_causes, + )) + raise raise_from( # NOTE: Leading "raise" is a hack for mypy bug #9717 + AnsibleError('\n'.join(error_msg_lines)), + dep_exc, + ) + except CollectionDependencyInconsistentCandidate as dep_exc: + parents = [ + "%s.%s:%s" % (p.namespace, p.name, p.ver) + for p in dep_exc.criterion.iter_parent() + if p is not None + ] + + error_msg_lines = [ + ( + 'Failed to resolve the requested dependencies map. ' + 'Got the candidate {req.fqcn!s}:{req.ver!s} ({dep_origin!s}) ' + 'which didn\'t satisfy all of the following requirements:'. + format( + req=dep_exc.candidate, + dep_origin='direct request' + if not parents else 'dependency of {parent!s}'. + format(parent=', '.join(parents)) + ) + ) + ] + + for req in dep_exc.criterion.iter_requirement(): + error_msg_lines.append( + '* {req.fqcn!s}:{req.ver!s}'.format(req=req) + ) + + raise raise_from( # NOTE: Leading "raise" is a hack for mypy bug #9717 + AnsibleError('\n'.join(error_msg_lines)), + dep_exc, + ) + except ValueError as exc: + raise AnsibleError(to_native(exc)) from exc diff --git a/lib/ansible/galaxy/collection/concrete_artifact_manager.py b/lib/ansible/galaxy/collection/concrete_artifact_manager.py new file mode 100644 index 0000000..7c920b8 --- /dev/null +++ b/lib/ansible/galaxy/collection/concrete_artifact_manager.py @@ -0,0 +1,755 @@ +# -*- coding: utf-8 -*- +# Copyright: (c) 2020-2021, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +"""Concrete collection candidate management helper module.""" + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import json +import os +import tarfile +import subprocess +import typing as t + +from contextlib import contextmanager +from hashlib import sha256 +from urllib.error import URLError +from urllib.parse import urldefrag +from shutil import rmtree +from tempfile import mkdtemp + +if t.TYPE_CHECKING: + from ansible.galaxy.dependency_resolution.dataclasses import ( + Candidate, Requirement, + ) + from ansible.galaxy.token import GalaxyToken + +from ansible.errors import AnsibleError +from ansible.galaxy import get_collections_galaxy_meta_info +from ansible.galaxy.dependency_resolution.dataclasses import _GALAXY_YAML +from ansible.galaxy.user_agent import user_agent +from ansible.module_utils._text import to_bytes, to_native, to_text +from ansible.module_utils.common.process import get_bin_path +from ansible.module_utils.common._collections_compat import MutableMapping +from ansible.module_utils.common.yaml import yaml_load +from ansible.module_utils.six import raise_from +from ansible.module_utils.urls import open_url +from ansible.utils.display import Display +from ansible.utils.sentinel import Sentinel + +import yaml + + +display = Display() + +MANIFEST_FILENAME = 'MANIFEST.json' + + +class ConcreteArtifactsManager: + """Manager for on-disk collection artifacts. + + It is responsible for: + * downloading remote collections from Galaxy-compatible servers and + direct links to tarballs or SCM repositories + * keeping track of local ones + * keeping track of Galaxy API tokens for downloads from Galaxy'ish + as well as the artifact hashes + * keeping track of Galaxy API signatures for downloads from Galaxy'ish + * caching all of above + * retrieving the metadata out of the downloaded artifacts + """ + def __init__(self, b_working_directory, validate_certs=True, keyring=None, timeout=60, required_signature_count=None, ignore_signature_errors=None): + # type: (bytes, bool, str, int, str, list[str]) -> None + """Initialize ConcreteArtifactsManager caches and costraints.""" + self._validate_certs = validate_certs # type: bool + self._artifact_cache = {} # type: dict[bytes, bytes] + self._galaxy_artifact_cache = {} # type: dict[Candidate | Requirement, bytes] + self._artifact_meta_cache = {} # type: dict[bytes, dict[str, str | list[str] | dict[str, str] | None | t.Type[Sentinel]]] + self._galaxy_collection_cache = {} # type: dict[Candidate | Requirement, tuple[str, str, GalaxyToken]] + self._galaxy_collection_origin_cache = {} # type: dict[Candidate, tuple[str, list[dict[str, str]]]] + self._b_working_directory = b_working_directory # type: bytes + self._supplemental_signature_cache = {} # type: dict[str, str] + self._keyring = keyring # type: str + self.timeout = timeout # type: int + self._required_signature_count = required_signature_count # type: str + self._ignore_signature_errors = ignore_signature_errors # type: list[str] + self._require_build_metadata = True # type: bool + + @property + def keyring(self): + return self._keyring + + @property + def required_successful_signature_count(self): + return self._required_signature_count + + @property + def ignore_signature_errors(self): + if self._ignore_signature_errors is None: + return [] + return self._ignore_signature_errors + + @property + def require_build_metadata(self): + # type: () -> bool + return self._require_build_metadata + + @require_build_metadata.setter + def require_build_metadata(self, value): + # type: (bool) -> None + self._require_build_metadata = value + + def get_galaxy_artifact_source_info(self, collection): + # type: (Candidate) -> dict[str, t.Union[str, list[dict[str, str]]]] + server = collection.src.api_server + + try: + download_url = self._galaxy_collection_cache[collection][0] + signatures_url, signatures = self._galaxy_collection_origin_cache[collection] + except KeyError as key_err: + raise RuntimeError( + 'The is no known source for {coll!s}'. + format(coll=collection), + ) from key_err + + return { + "format_version": "1.0.0", + "namespace": collection.namespace, + "name": collection.name, + "version": collection.ver, + "server": server, + "version_url": signatures_url, + "download_url": download_url, + "signatures": signatures, + } + + def get_galaxy_artifact_path(self, collection): + # type: (t.Union[Candidate, Requirement]) -> bytes + """Given a Galaxy-stored collection, return a cached path. + + If it's not yet on disk, this method downloads the artifact first. + """ + try: + return self._galaxy_artifact_cache[collection] + except KeyError: + pass + + try: + url, sha256_hash, token = self._galaxy_collection_cache[collection] + except KeyError as key_err: + raise_from( + RuntimeError( + 'The is no known source for {coll!s}'. + format(coll=collection), + ), + key_err, + ) + + display.vvvv( + "Fetching a collection tarball for '{collection!s}' from " + 'Ansible Galaxy'.format(collection=collection), + ) + + try: + b_artifact_path = _download_file( + url, + self._b_working_directory, + expected_hash=sha256_hash, + validate_certs=self._validate_certs, + token=token, + ) # type: bytes + except URLError as err: + raise_from( + AnsibleError( + 'Failed to download collection tar ' + "from '{coll_src!s}': {download_err!s}". + format( + coll_src=to_native(collection.src), + download_err=to_native(err), + ), + ), + err, + ) + else: + display.vvv( + "Collection '{coll!s}' obtained from " + 'server {server!s} {url!s}'.format( + coll=collection, server=collection.src or 'Galaxy', + url=collection.src.api_server if collection.src is not None + else '', + ) + ) + + self._galaxy_artifact_cache[collection] = b_artifact_path + return b_artifact_path + + def get_artifact_path(self, collection): + # type: (t.Union[Candidate, Requirement]) -> bytes + """Given a concrete collection pointer, return a cached path. + + If it's not yet on disk, this method downloads the artifact first. + """ + try: + return self._artifact_cache[collection.src] + except KeyError: + pass + + # NOTE: SCM needs to be special-cased as it may contain either + # NOTE: one collection in its root, or a number of top-level + # NOTE: collection directories instead. + # NOTE: The idea is to store the SCM collection as unpacked + # NOTE: directory structure under the temporary location and use + # NOTE: a "virtual" collection that has pinned requirements on + # NOTE: the directories under that SCM checkout that correspond + # NOTE: to collections. + # NOTE: This brings us to the idea that we need two separate + # NOTE: virtual Requirement/Candidate types -- + # NOTE: (single) dir + (multidir) subdirs + if collection.is_url: + display.vvvv( + "Collection requirement '{collection!s}' is a URL " + 'to a tar artifact'.format(collection=collection.fqcn), + ) + try: + b_artifact_path = _download_file( + collection.src, + self._b_working_directory, + expected_hash=None, # NOTE: URLs don't support checksums + validate_certs=self._validate_certs, + timeout=self.timeout + ) + except Exception as err: + raise_from( + AnsibleError( + 'Failed to download collection tar ' + "from '{coll_src!s}': {download_err!s}". + format( + coll_src=to_native(collection.src), + download_err=to_native(err), + ), + ), + err, + ) + elif collection.is_scm: + b_artifact_path = _extract_collection_from_git( + collection.src, + collection.ver, + self._b_working_directory, + ) + elif collection.is_file or collection.is_dir or collection.is_subdirs: + b_artifact_path = to_bytes(collection.src) + else: + # NOTE: This may happen `if collection.is_online_index_pointer` + raise RuntimeError( + 'The artifact is of an unexpected type {art_type!s}'. + format(art_type=collection.type) + ) + + self._artifact_cache[collection.src] = b_artifact_path + return b_artifact_path + + def _get_direct_collection_namespace(self, collection): + # type: (Candidate) -> t.Optional[str] + return self.get_direct_collection_meta(collection)['namespace'] # type: ignore[return-value] + + def _get_direct_collection_name(self, collection): + # type: (Candidate) -> t.Optional[str] + return self.get_direct_collection_meta(collection)['name'] # type: ignore[return-value] + + def get_direct_collection_fqcn(self, collection): + # type: (Candidate) -> t.Optional[str] + """Extract FQCN from the given on-disk collection artifact. + + If the collection is virtual, ``None`` is returned instead + of a string. + """ + if collection.is_virtual: + # NOTE: should it be something like "<virtual>"? + return None + + return '.'.join(( # type: ignore[type-var] + self._get_direct_collection_namespace(collection), # type: ignore[arg-type] + self._get_direct_collection_name(collection), + )) + + def get_direct_collection_version(self, collection): + # type: (t.Union[Candidate, Requirement]) -> str + """Extract version from the given on-disk collection artifact.""" + return self.get_direct_collection_meta(collection)['version'] # type: ignore[return-value] + + def get_direct_collection_dependencies(self, collection): + # type: (t.Union[Candidate, Requirement]) -> dict[str, str] + """Extract deps from the given on-disk collection artifact.""" + collection_dependencies = self.get_direct_collection_meta(collection)['dependencies'] + if collection_dependencies is None: + collection_dependencies = {} + return collection_dependencies # type: ignore[return-value] + + def get_direct_collection_meta(self, collection): + # type: (t.Union[Candidate, Requirement]) -> dict[str, t.Union[str, dict[str, str], list[str], None, t.Type[Sentinel]]] + """Extract meta from the given on-disk collection artifact.""" + try: # FIXME: use unique collection identifier as a cache key? + return self._artifact_meta_cache[collection.src] + except KeyError: + b_artifact_path = self.get_artifact_path(collection) + + if collection.is_url or collection.is_file: + collection_meta = _get_meta_from_tar(b_artifact_path) + elif collection.is_dir: # should we just build a coll instead? + # FIXME: what if there's subdirs? + try: + collection_meta = _get_meta_from_dir(b_artifact_path, self.require_build_metadata) + except LookupError as lookup_err: + raise_from( + AnsibleError( + 'Failed to find the collection dir deps: {err!s}'. + format(err=to_native(lookup_err)), + ), + lookup_err, + ) + elif collection.is_scm: + collection_meta = { + 'name': None, + 'namespace': None, + 'dependencies': {to_native(b_artifact_path): '*'}, + 'version': '*', + } + elif collection.is_subdirs: + collection_meta = { + 'name': None, + 'namespace': None, + # NOTE: Dropping b_artifact_path since it's based on src anyway + 'dependencies': dict.fromkeys( + map(to_native, collection.namespace_collection_paths), + '*', + ), + 'version': '*', + } + else: + raise RuntimeError + + self._artifact_meta_cache[collection.src] = collection_meta + return collection_meta + + def save_collection_source(self, collection, url, sha256_hash, token, signatures_url, signatures): + # type: (Candidate, str, str, GalaxyToken, str, list[dict[str, str]]) -> None + """Store collection URL, SHA256 hash and Galaxy API token. + + This is a hook that is supposed to be called before attempting to + download Galaxy-based collections with ``get_galaxy_artifact_path()``. + """ + self._galaxy_collection_cache[collection] = url, sha256_hash, token + self._galaxy_collection_origin_cache[collection] = signatures_url, signatures + + @classmethod + @contextmanager + def under_tmpdir( + cls, + temp_dir_base, # type: str + validate_certs=True, # type: bool + keyring=None, # type: str + required_signature_count=None, # type: str + ignore_signature_errors=None, # type: list[str] + require_build_metadata=True, # type: bool + ): # type: (...) -> t.Iterator[ConcreteArtifactsManager] + """Custom ConcreteArtifactsManager constructor with temp dir. + + This method returns a context manager that allocates and cleans + up a temporary directory for caching the collection artifacts + during the dependency resolution process. + """ + # NOTE: Can't use `with tempfile.TemporaryDirectory:` + # NOTE: because it's not in Python 2 stdlib. + temp_path = mkdtemp( + dir=to_bytes(temp_dir_base, errors='surrogate_or_strict'), + ) + b_temp_path = to_bytes(temp_path, errors='surrogate_or_strict') + try: + yield cls( + b_temp_path, + validate_certs, + keyring=keyring, + required_signature_count=required_signature_count, + ignore_signature_errors=ignore_signature_errors + ) + finally: + rmtree(b_temp_path) + + +def parse_scm(collection, version): + """Extract name, version, path and subdir out of the SCM pointer.""" + if ',' in collection: + collection, version = collection.split(',', 1) + elif version == '*' or not version: + version = 'HEAD' + + if collection.startswith('git+'): + path = collection[4:] + else: + path = collection + + path, fragment = urldefrag(path) + fragment = fragment.strip(os.path.sep) + + if path.endswith(os.path.sep + '.git'): + name = path.split(os.path.sep)[-2] + elif '://' not in path and '@' not in path: + name = path + else: + name = path.split('/')[-1] + if name.endswith('.git'): + name = name[:-4] + + return name, version, path, fragment + + +def _extract_collection_from_git(repo_url, coll_ver, b_path): + name, version, git_url, fragment = parse_scm(repo_url, coll_ver) + b_checkout_path = mkdtemp( + dir=b_path, + prefix=to_bytes(name, errors='surrogate_or_strict'), + ) # type: bytes + + try: + git_executable = get_bin_path('git') + except ValueError as err: + raise AnsibleError( + "Could not find git executable to extract the collection from the Git repository `{repo_url!s}`.". + format(repo_url=to_native(git_url)) + ) from err + + # Perform a shallow clone if simply cloning HEAD + if version == 'HEAD': + git_clone_cmd = git_executable, 'clone', '--depth=1', git_url, to_text(b_checkout_path) + else: + git_clone_cmd = git_executable, 'clone', git_url, to_text(b_checkout_path) + # FIXME: '--branch', version + + try: + subprocess.check_call(git_clone_cmd) + except subprocess.CalledProcessError as proc_err: + raise_from( + AnsibleError( # should probably be LookupError + 'Failed to clone a Git repository from `{repo_url!s}`.'. + format(repo_url=to_native(git_url)), + ), + proc_err, + ) + + git_switch_cmd = git_executable, 'checkout', to_text(version) + try: + subprocess.check_call(git_switch_cmd, cwd=b_checkout_path) + except subprocess.CalledProcessError as proc_err: + raise_from( + AnsibleError( # should probably be LookupError + 'Failed to switch a cloned Git repo `{repo_url!s}` ' + 'to the requested revision `{commitish!s}`.'. + format( + commitish=to_native(version), + repo_url=to_native(git_url), + ), + ), + proc_err, + ) + + return ( + os.path.join(b_checkout_path, to_bytes(fragment)) + if fragment else b_checkout_path + ) + + +# FIXME: use random subdirs while preserving the file names +def _download_file(url, b_path, expected_hash, validate_certs, token=None, timeout=60): + # type: (str, bytes, t.Optional[str], bool, GalaxyToken, int) -> bytes + # ^ NOTE: used in download and verify_collections ^ + b_tarball_name = to_bytes( + url.rsplit('/', 1)[1], errors='surrogate_or_strict', + ) + b_file_name = b_tarball_name[:-len('.tar.gz')] + + b_tarball_dir = mkdtemp( + dir=b_path, + prefix=b'-'.join((b_file_name, b'')), + ) # type: bytes + + b_file_path = os.path.join(b_tarball_dir, b_tarball_name) + + display.display("Downloading %s to %s" % (url, to_text(b_tarball_dir))) + # NOTE: Galaxy redirects downloads to S3 which rejects the request + # NOTE: if an Authorization header is attached so don't redirect it + resp = open_url( + to_native(url, errors='surrogate_or_strict'), + validate_certs=validate_certs, + headers=None if token is None else token.headers(), + unredirected_headers=['Authorization'], http_agent=user_agent(), + timeout=timeout + ) + + with open(b_file_path, 'wb') as download_file: # type: t.BinaryIO + actual_hash = _consume_file(resp, write_to=download_file) + + if expected_hash: + display.vvvv( + 'Validating downloaded file hash {actual_hash!s} with ' + 'expected hash {expected_hash!s}'. + format(actual_hash=actual_hash, expected_hash=expected_hash) + ) + if expected_hash != actual_hash: + raise AnsibleError('Mismatch artifact hash with downloaded file') + + return b_file_path + + +def _consume_file(read_from, write_to=None): + # type: (t.BinaryIO, t.BinaryIO) -> str + bufsize = 65536 + sha256_digest = sha256() + data = read_from.read(bufsize) + while data: + if write_to is not None: + write_to.write(data) + write_to.flush() + sha256_digest.update(data) + data = read_from.read(bufsize) + + return sha256_digest.hexdigest() + + +def _normalize_galaxy_yml_manifest( + galaxy_yml, # type: dict[str, t.Union[str, list[str], dict[str, str], None, t.Type[Sentinel]]] + b_galaxy_yml_path, # type: bytes + require_build_metadata=True, # type: bool +): + # type: (...) -> dict[str, t.Union[str, list[str], dict[str, str], None, t.Type[Sentinel]]] + galaxy_yml_schema = ( + get_collections_galaxy_meta_info() + ) # type: list[dict[str, t.Any]] # FIXME: <-- + # FIXME: 👆maybe precise type: list[dict[str, t.Union[bool, str, list[str]]]] + + mandatory_keys = set() + string_keys = set() # type: set[str] + list_keys = set() # type: set[str] + dict_keys = set() # type: set[str] + sentinel_keys = set() # type: set[str] + + for info in galaxy_yml_schema: + if info.get('required', False): + mandatory_keys.add(info['key']) + + key_list_type = { + 'str': string_keys, + 'list': list_keys, + 'dict': dict_keys, + 'sentinel': sentinel_keys, + }[info.get('type', 'str')] + key_list_type.add(info['key']) + + all_keys = frozenset(mandatory_keys | string_keys | list_keys | dict_keys | sentinel_keys) + + set_keys = set(galaxy_yml.keys()) + missing_keys = mandatory_keys.difference(set_keys) + if missing_keys: + msg = ( + "The collection galaxy.yml at '%s' is missing the following mandatory keys: %s" + % (to_native(b_galaxy_yml_path), ", ".join(sorted(missing_keys))) + ) + if require_build_metadata: + raise AnsibleError(msg) + display.warning(msg) + raise ValueError(msg) + + extra_keys = set_keys.difference(all_keys) + if len(extra_keys) > 0: + display.warning("Found unknown keys in collection galaxy.yml at '%s': %s" + % (to_text(b_galaxy_yml_path), ", ".join(extra_keys))) + + # Add the defaults if they have not been set + for optional_string in string_keys: + if optional_string not in galaxy_yml: + galaxy_yml[optional_string] = None + + for optional_list in list_keys: + list_val = galaxy_yml.get(optional_list, None) + + if list_val is None: + galaxy_yml[optional_list] = [] + elif not isinstance(list_val, list): + galaxy_yml[optional_list] = [list_val] # type: ignore[list-item] + + for optional_dict in dict_keys: + if optional_dict not in galaxy_yml: + galaxy_yml[optional_dict] = {} + + for optional_sentinel in sentinel_keys: + if optional_sentinel not in galaxy_yml: + galaxy_yml[optional_sentinel] = Sentinel + + # NOTE: `version: null` is only allowed for `galaxy.yml` + # NOTE: and not `MANIFEST.json`. The use-case for it is collections + # NOTE: that generate the version from Git before building a + # NOTE: distributable tarball artifact. + if not galaxy_yml.get('version'): + galaxy_yml['version'] = '*' + + return galaxy_yml + + +def _get_meta_from_dir( + b_path, # type: bytes + require_build_metadata=True, # type: bool +): # type: (...) -> dict[str, t.Union[str, list[str], dict[str, str], None, t.Type[Sentinel]]] + try: + return _get_meta_from_installed_dir(b_path) + except LookupError: + return _get_meta_from_src_dir(b_path, require_build_metadata) + + +def _get_meta_from_src_dir( + b_path, # type: bytes + require_build_metadata=True, # type: bool +): # type: (...) -> dict[str, t.Union[str, list[str], dict[str, str], None, t.Type[Sentinel]]] + galaxy_yml = os.path.join(b_path, _GALAXY_YAML) + if not os.path.isfile(galaxy_yml): + raise LookupError( + "The collection galaxy.yml path '{path!s}' does not exist.". + format(path=to_native(galaxy_yml)) + ) + + with open(galaxy_yml, 'rb') as manifest_file_obj: + try: + manifest = yaml_load(manifest_file_obj) + except yaml.error.YAMLError as yaml_err: + raise_from( + AnsibleError( + "Failed to parse the galaxy.yml at '{path!s}' with " + 'the following error:\n{err_txt!s}'. + format( + path=to_native(galaxy_yml), + err_txt=to_native(yaml_err), + ), + ), + yaml_err, + ) + + if not isinstance(manifest, dict): + if require_build_metadata: + raise AnsibleError(f"The collection galaxy.yml at '{to_native(galaxy_yml)}' is incorrectly formatted.") + # Valid build metadata is not required by ansible-galaxy list. Raise ValueError to fall back to implicit metadata. + display.warning(f"The collection galaxy.yml at '{to_native(galaxy_yml)}' is incorrectly formatted.") + raise ValueError(f"The collection galaxy.yml at '{to_native(galaxy_yml)}' is incorrectly formatted.") + + return _normalize_galaxy_yml_manifest(manifest, galaxy_yml, require_build_metadata) + + +def _get_json_from_installed_dir( + b_path, # type: bytes + filename, # type: str +): # type: (...) -> dict + + b_json_filepath = os.path.join(b_path, to_bytes(filename, errors='surrogate_or_strict')) + + try: + with open(b_json_filepath, 'rb') as manifest_fd: + b_json_text = manifest_fd.read() + except (IOError, OSError): + raise LookupError( + "The collection {manifest!s} path '{path!s}' does not exist.". + format( + manifest=filename, + path=to_native(b_json_filepath), + ) + ) + + manifest_txt = to_text(b_json_text, errors='surrogate_or_strict') + + try: + manifest = json.loads(manifest_txt) + except ValueError: + raise AnsibleError( + 'Collection tar file member {member!s} does not ' + 'contain a valid json string.'. + format(member=filename), + ) + + return manifest + + +def _get_meta_from_installed_dir( + b_path, # type: bytes +): # type: (...) -> dict[str, t.Union[str, list[str], dict[str, str], None, t.Type[Sentinel]]] + manifest = _get_json_from_installed_dir(b_path, MANIFEST_FILENAME) + collection_info = manifest['collection_info'] + + version = collection_info.get('version') + if not version: + raise AnsibleError( + u'Collection metadata file `{manifest_filename!s}` at `{meta_file!s}` is expected ' + u'to have a valid SemVer version value but got {version!s}'. + format( + manifest_filename=MANIFEST_FILENAME, + meta_file=to_text(b_path), + version=to_text(repr(version)), + ), + ) + + return collection_info + + +def _get_meta_from_tar( + b_path, # type: bytes +): # type: (...) -> dict[str, t.Union[str, list[str], dict[str, str], None, t.Type[Sentinel]]] + if not tarfile.is_tarfile(b_path): + raise AnsibleError( + "Collection artifact at '{path!s}' is not a valid tar file.". + format(path=to_native(b_path)), + ) + + with tarfile.open(b_path, mode='r') as collection_tar: # type: tarfile.TarFile + try: + member = collection_tar.getmember(MANIFEST_FILENAME) + except KeyError: + raise AnsibleError( + "Collection at '{path!s}' does not contain the " + 'required file {manifest_file!s}.'. + format( + path=to_native(b_path), + manifest_file=MANIFEST_FILENAME, + ), + ) + + with _tarfile_extract(collection_tar, member) as (_member, member_obj): + if member_obj is None: + raise AnsibleError( + 'Collection tar file does not contain ' + 'member {member!s}'.format(member=MANIFEST_FILENAME), + ) + + text_content = to_text( + member_obj.read(), + errors='surrogate_or_strict', + ) + + try: + manifest = json.loads(text_content) + except ValueError: + raise AnsibleError( + 'Collection tar file member {member!s} does not ' + 'contain a valid json string.'. + format(member=MANIFEST_FILENAME), + ) + return manifest['collection_info'] + + +@contextmanager +def _tarfile_extract( + tar, # type: tarfile.TarFile + member, # type: tarfile.TarInfo +): + # type: (...) -> t.Iterator[tuple[tarfile.TarInfo, t.Optional[t.IO[bytes]]]] + tar_obj = tar.extractfile(member) + try: + yield member, tar_obj + finally: + if tar_obj is not None: + tar_obj.close() diff --git a/lib/ansible/galaxy/collection/galaxy_api_proxy.py b/lib/ansible/galaxy/collection/galaxy_api_proxy.py new file mode 100644 index 0000000..51e0c9f --- /dev/null +++ b/lib/ansible/galaxy/collection/galaxy_api_proxy.py @@ -0,0 +1,216 @@ +# -*- coding: utf-8 -*- +# Copyright: (c) 2020-2021, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +"""A facade for interfacing with multiple Galaxy instances.""" + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import typing as t + +if t.TYPE_CHECKING: + from ansible.galaxy.api import CollectionVersionMetadata + from ansible.galaxy.collection.concrete_artifact_manager import ( + ConcreteArtifactsManager, + ) + from ansible.galaxy.dependency_resolution.dataclasses import ( + Candidate, Requirement, + ) + +from ansible.galaxy.api import GalaxyAPI, GalaxyError +from ansible.module_utils._text import to_text +from ansible.utils.display import Display + + +display = Display() + + +class MultiGalaxyAPIProxy: + """A proxy that abstracts talking to multiple Galaxy instances.""" + + def __init__(self, apis, concrete_artifacts_manager, offline=False): + # type: (t.Iterable[GalaxyAPI], ConcreteArtifactsManager, bool) -> None + """Initialize the target APIs list.""" + self._apis = apis + self._concrete_art_mgr = concrete_artifacts_manager + self._offline = offline # Prevent all GalaxyAPI calls + + @property + def is_offline_mode_requested(self): + return self._offline + + def _assert_that_offline_mode_is_not_requested(self): # type: () -> None + if self.is_offline_mode_requested: + raise NotImplementedError("The calling code is not supposed to be invoked in 'offline' mode.") + + def _get_collection_versions(self, requirement): + # type: (Requirement) -> t.Iterator[tuple[GalaxyAPI, str]] + """Helper for get_collection_versions. + + Yield api, version pairs for all APIs, + and reraise the last error if no valid API was found. + """ + if self._offline: + return [] + + found_api = False + last_error = None # type: Exception | None + + api_lookup_order = ( + (requirement.src, ) + if isinstance(requirement.src, GalaxyAPI) + else self._apis + ) + + for api in api_lookup_order: + try: + versions = api.get_collection_versions(requirement.namespace, requirement.name) + except GalaxyError as api_err: + last_error = api_err + except Exception as unknown_err: + display.warning( + "Skipping Galaxy server {server!s}. " + "Got an unexpected error when getting " + "available versions of collection {fqcn!s}: {err!s}". + format( + server=api.api_server, + fqcn=requirement.fqcn, + err=to_text(unknown_err), + ) + ) + last_error = unknown_err + else: + found_api = True + for version in versions: + yield api, version + + if not found_api and last_error is not None: + raise last_error + + def get_collection_versions(self, requirement): + # type: (Requirement) -> t.Iterable[tuple[str, GalaxyAPI]] + """Get a set of unique versions for FQCN on Galaxy servers.""" + if requirement.is_concrete_artifact: + return { + ( + self._concrete_art_mgr. + get_direct_collection_version(requirement), + requirement.src, + ), + } + + api_lookup_order = ( + (requirement.src, ) + if isinstance(requirement.src, GalaxyAPI) + else self._apis + ) + return set( + (version, api) + for api, version in self._get_collection_versions( + requirement, + ) + ) + + def get_collection_version_metadata(self, collection_candidate): + # type: (Candidate) -> CollectionVersionMetadata + """Retrieve collection metadata of a given candidate.""" + self._assert_that_offline_mode_is_not_requested() + + api_lookup_order = ( + (collection_candidate.src, ) + if isinstance(collection_candidate.src, GalaxyAPI) + else self._apis + ) + + last_err: t.Optional[Exception] + + for api in api_lookup_order: + try: + version_metadata = api.get_collection_version_metadata( + collection_candidate.namespace, + collection_candidate.name, + collection_candidate.ver, + ) + except GalaxyError as api_err: + last_err = api_err + except Exception as unknown_err: + # `verify` doesn't use `get_collection_versions` since the version is already known. + # Do the same as `install` and `download` by trying all APIs before failing. + # Warn for debugging purposes, since the Galaxy server may be unexpectedly down. + last_err = unknown_err + display.warning( + "Skipping Galaxy server {server!s}. " + "Got an unexpected error when getting " + "available versions of collection {fqcn!s}: {err!s}". + format( + server=api.api_server, + fqcn=collection_candidate.fqcn, + err=to_text(unknown_err), + ) + ) + else: + self._concrete_art_mgr.save_collection_source( + collection_candidate, + version_metadata.download_url, + version_metadata.artifact_sha256, + api.token, + version_metadata.signatures_url, + version_metadata.signatures, + ) + return version_metadata + + raise last_err + + def get_collection_dependencies(self, collection_candidate): + # type: (Candidate) -> dict[str, str] + # FIXME: return Requirement instances instead? + """Retrieve collection dependencies of a given candidate.""" + if collection_candidate.is_concrete_artifact: + return ( + self. + _concrete_art_mgr. + get_direct_collection_dependencies + )(collection_candidate) + + return ( + self. + get_collection_version_metadata(collection_candidate). + dependencies + ) + + def get_signatures(self, collection_candidate): + # type: (Candidate) -> list[str] + self._assert_that_offline_mode_is_not_requested() + namespace = collection_candidate.namespace + name = collection_candidate.name + version = collection_candidate.ver + last_err = None # type: Exception | None + + api_lookup_order = ( + (collection_candidate.src, ) + if isinstance(collection_candidate.src, GalaxyAPI) + else self._apis + ) + + for api in api_lookup_order: + try: + return api.get_collection_signatures(namespace, name, version) + except GalaxyError as api_err: + last_err = api_err + except Exception as unknown_err: + # Warn for debugging purposes, since the Galaxy server may be unexpectedly down. + last_err = unknown_err + display.warning( + "Skipping Galaxy server {server!s}. " + "Got an unexpected error when getting " + "available versions of collection {fqcn!s}: {err!s}". + format( + server=api.api_server, + fqcn=collection_candidate.fqcn, + err=to_text(unknown_err), + ) + ) + if last_err: + raise last_err + + return [] diff --git a/lib/ansible/galaxy/collection/gpg.py b/lib/ansible/galaxy/collection/gpg.py new file mode 100644 index 0000000..8641f0d --- /dev/null +++ b/lib/ansible/galaxy/collection/gpg.py @@ -0,0 +1,282 @@ +# -*- coding: utf-8 -*- +# Copyright: (c) 2022, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +"""Signature verification helpers.""" + +from ansible.errors import AnsibleError +from ansible.galaxy.user_agent import user_agent +from ansible.module_utils.urls import open_url + +import contextlib +import os +import subprocess +import sys +import typing as t + +from dataclasses import dataclass, fields as dc_fields +from functools import partial +from urllib.error import HTTPError, URLError + +if t.TYPE_CHECKING: + from ansible.utils.display import Display + +IS_PY310_PLUS = sys.version_info[:2] >= (3, 10) + +frozen_dataclass = partial(dataclass, frozen=True, **({'slots': True} if IS_PY310_PLUS else {})) + + +def get_signature_from_source(source, display=None): # type: (str, t.Optional[Display]) -> str + if display is not None: + display.vvvv(f"Using signature at {source}") + try: + with open_url( + source, + http_agent=user_agent(), + validate_certs=True, + follow_redirects='safe' + ) as resp: + signature = resp.read() + except (HTTPError, URLError) as e: + raise AnsibleError( + f"Failed to get signature for collection verification from '{source}': {e}" + ) from e + + return signature + + +def run_gpg_verify( + manifest_file, # type: str + signature, # type: str + keyring, # type: str + display, # type: Display +): # type: (...) -> tuple[str, int] + status_fd_read, status_fd_write = os.pipe() + + # running the gpg command will create the keyring if it does not exist + remove_keybox = not os.path.exists(keyring) + + cmd = [ + 'gpg', + f'--status-fd={status_fd_write}', + '--verify', + '--batch', + '--no-tty', + '--no-default-keyring', + f'--keyring={keyring}', + '-', + manifest_file, + ] + cmd_str = ' '.join(cmd) + display.vvvv(f"Running command '{cmd}'") + + try: + p = subprocess.Popen( + cmd, + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + pass_fds=(status_fd_write,), + encoding='utf8', + ) + except (FileNotFoundError, subprocess.SubprocessError) as err: + raise AnsibleError( + f"Failed during GnuPG verification with command '{cmd_str}': {err}" + ) from err + else: + stdout, stderr = p.communicate(input=signature) + finally: + os.close(status_fd_write) + + if remove_keybox: + with contextlib.suppress(OSError): + os.remove(keyring) + + with os.fdopen(status_fd_read) as f: + stdout = f.read() + display.vvvv( + f"stdout: \n{stdout}\nstderr: \n{stderr}\n(exit code {p.returncode})" + ) + return stdout, p.returncode + + +def parse_gpg_errors(status_out): # type: (str) -> t.Iterator[GpgBaseError] + for line in status_out.splitlines(): + if not line: + continue + try: + _dummy, status, remainder = line.split(maxsplit=2) + except ValueError: + _dummy, status = line.split(maxsplit=1) + remainder = None + + try: + cls = GPG_ERROR_MAP[status] + except KeyError: + continue + + fields = [status] + if remainder: + fields.extend( + remainder.split( + None, + len(dc_fields(cls)) - 2 + ) + ) + + yield cls(*fields) + + +@frozen_dataclass +class GpgBaseError(Exception): + status: str + + @classmethod + def get_gpg_error_description(cls) -> str: + """Return the current class description.""" + return ' '.join(cls.__doc__.split()) + + def __post_init__(self): + for field in dc_fields(self): + super(GpgBaseError, self).__setattr__(field.name, field.type(getattr(self, field.name))) + + +@frozen_dataclass +class GpgExpSig(GpgBaseError): + """The signature with the keyid is good, but the signature is expired.""" + keyid: str + username: str + + +@frozen_dataclass +class GpgExpKeySig(GpgBaseError): + """The signature with the keyid is good, but the signature was made by an expired key.""" + keyid: str + username: str + + +@frozen_dataclass +class GpgRevKeySig(GpgBaseError): + """The signature with the keyid is good, but the signature was made by a revoked key.""" + keyid: str + username: str + + +@frozen_dataclass +class GpgBadSig(GpgBaseError): + """The signature with the keyid has not been verified okay.""" + keyid: str + username: str + + +@frozen_dataclass +class GpgErrSig(GpgBaseError): + """"It was not possible to check the signature. This may be caused by + a missing public key or an unsupported algorithm. A RC of 4 + indicates unknown algorithm, a 9 indicates a missing public + key. + """ + keyid: str + pkalgo: int + hashalgo: int + sig_class: str + time: int + rc: int + fpr: str + + +@frozen_dataclass +class GpgNoPubkey(GpgBaseError): + """The public key is not available.""" + keyid: str + + +@frozen_dataclass +class GpgMissingPassPhrase(GpgBaseError): + """No passphrase was supplied.""" + + +@frozen_dataclass +class GpgBadPassphrase(GpgBaseError): + """The supplied passphrase was wrong or not given.""" + keyid: str + + +@frozen_dataclass +class GpgNoData(GpgBaseError): + """No data has been found. Codes for WHAT are: + - 1 :: No armored data. + - 2 :: Expected a packet but did not find one. + - 3 :: Invalid packet found, this may indicate a non OpenPGP + message. + - 4 :: Signature expected but not found. + """ + what: str + + +@frozen_dataclass +class GpgUnexpected(GpgBaseError): + """No data has been found. Codes for WHAT are: + - 1 :: No armored data. + - 2 :: Expected a packet but did not find one. + - 3 :: Invalid packet found, this may indicate a non OpenPGP + message. + - 4 :: Signature expected but not found. + """ + what: str + + +@frozen_dataclass +class GpgError(GpgBaseError): + """This is a generic error status message, it might be followed by error location specific data.""" + location: str + code: int + more: str = "" + + +@frozen_dataclass +class GpgFailure(GpgBaseError): + """This is the counterpart to SUCCESS and used to indicate a program failure.""" + location: str + code: int + + +@frozen_dataclass +class GpgBadArmor(GpgBaseError): + """The ASCII armor is corrupted.""" + + +@frozen_dataclass +class GpgKeyExpired(GpgBaseError): + """The key has expired.""" + timestamp: int + + +@frozen_dataclass +class GpgKeyRevoked(GpgBaseError): + """The used key has been revoked by its owner.""" + + +@frozen_dataclass +class GpgNoSecKey(GpgBaseError): + """The secret key is not available.""" + keyid: str + + +GPG_ERROR_MAP = { + 'EXPSIG': GpgExpSig, + 'EXPKEYSIG': GpgExpKeySig, + 'REVKEYSIG': GpgRevKeySig, + 'BADSIG': GpgBadSig, + 'ERRSIG': GpgErrSig, + 'NO_PUBKEY': GpgNoPubkey, + 'MISSING_PASSPHRASE': GpgMissingPassPhrase, + 'BAD_PASSPHRASE': GpgBadPassphrase, + 'NODATA': GpgNoData, + 'UNEXPECTED': GpgUnexpected, + 'ERROR': GpgError, + 'FAILURE': GpgFailure, + 'BADARMOR': GpgBadArmor, + 'KEYEXPIRED': GpgKeyExpired, + 'KEYREVOKED': GpgKeyRevoked, + 'NO_SECKEY': GpgNoSecKey, +} diff --git a/lib/ansible/galaxy/data/apb/Dockerfile.j2 b/lib/ansible/galaxy/data/apb/Dockerfile.j2 new file mode 100644 index 0000000..4d99a8b --- /dev/null +++ b/lib/ansible/galaxy/data/apb/Dockerfile.j2 @@ -0,0 +1,9 @@ +FROM ansibleplaybookbundle/apb-base + +LABEL "com.redhat.apb.spec"=\ +"" + +COPY playbooks /opt/apb/actions +COPY . /opt/ansible/roles/{{ role_name }} +RUN chmod -R g=u /opt/{ansible,apb} +USER apb diff --git a/lib/ansible/galaxy/data/apb/Makefile.j2 b/lib/ansible/galaxy/data/apb/Makefile.j2 new file mode 100644 index 0000000..ebeaa61 --- /dev/null +++ b/lib/ansible/galaxy/data/apb/Makefile.j2 @@ -0,0 +1,21 @@ +DOCKERHOST = DOCKERHOST +DOCKERORG = DOCKERORG +IMAGENAME = {{ role_name }} +TAG = latest +USER=$(shell id -u) +PWD=$(shell pwd) +build_and_push: apb_build docker_push apb_push + +.PHONY: apb_build +apb_build: + docker run --rm --privileged -v $(PWD):/mnt:z -v $(HOME)/.kube:/.kube -v /var/run/docker.sock:/var/run/docker.sock -u $(USER) docker.io/ansibleplaybookbundle/apb-tools:latest prepare + docker build -t $(DOCKERHOST)/$(DOCKERORG)/$(IMAGENAME):$(TAG) . + +.PHONY: docker_push +docker_push: + docker push $(DOCKERHOST)/$(DOCKERORG)/$(IMAGENAME):$(TAG) + +.PHONY: apb_push +apb_push: + docker run --rm --privileged -v $(PWD):/mnt:z -v $(HOME)/.kube:/.kube -v /var/run/docker.sock:/var/run/docker.sock -u $(USER) docker.io/ansibleplaybookbundle/apb-tools:latest push + diff --git a/lib/ansible/galaxy/data/apb/README.md b/lib/ansible/galaxy/data/apb/README.md new file mode 100644 index 0000000..2e350a0 --- /dev/null +++ b/lib/ansible/galaxy/data/apb/README.md @@ -0,0 +1,38 @@ +APB Name +========= + +A brief description of the APB goes here. + +Requirements +------------ + +Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. + +APB Variables +-------------- + +A description of the settable variables for this APB should go here, including any variables that are in defaults/main.yml, vars/main.yml, apb.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (i.e. hostvars, group vars, etc.) should be mentioned here as well. + +Dependencies +------------ + +A list of other APBs/roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. + +Example Playbook +---------------- + +Including an example of how to use your APB (for instance, with variables passed in as parameters) is always nice for users too: + + - hosts: servers + roles: + - { role: username.rolename, x: 42 } + +License +------- + +BSD + +Author Information +------------------ + +An optional section for the role authors to include contact information, or a website (HTML is not allowed). diff --git a/lib/ansible/galaxy/data/apb/apb.yml.j2 b/lib/ansible/galaxy/data/apb/apb.yml.j2 new file mode 100644 index 0000000..f968801 --- /dev/null +++ b/lib/ansible/galaxy/data/apb/apb.yml.j2 @@ -0,0 +1,13 @@ +version: '1.0.0' +name: {{ role_name }} +description: {{ description }} +bindable: False +async: optional +metadata: + displayName: {{ role_name }} +plans: + - name: default + description: This default plan deploys {{ role_name }} + free: True + metadata: {} + parameters: [] diff --git a/lib/ansible/galaxy/data/apb/defaults/main.yml.j2 b/lib/ansible/galaxy/data/apb/defaults/main.yml.j2 new file mode 100644 index 0000000..3818e64 --- /dev/null +++ b/lib/ansible/galaxy/data/apb/defaults/main.yml.j2 @@ -0,0 +1,2 @@ +--- +# defaults file for {{ role_name }} diff --git a/lib/ansible/galaxy/data/apb/files/.git_keep b/lib/ansible/galaxy/data/apb/files/.git_keep new file mode 100644 index 0000000..e69de29 --- /dev/null +++ b/lib/ansible/galaxy/data/apb/files/.git_keep diff --git a/lib/ansible/galaxy/data/apb/handlers/main.yml.j2 b/lib/ansible/galaxy/data/apb/handlers/main.yml.j2 new file mode 100644 index 0000000..3f4c496 --- /dev/null +++ b/lib/ansible/galaxy/data/apb/handlers/main.yml.j2 @@ -0,0 +1,2 @@ +--- +# handlers file for {{ role_name }} diff --git a/lib/ansible/galaxy/data/apb/meta/main.yml.j2 b/lib/ansible/galaxy/data/apb/meta/main.yml.j2 new file mode 100644 index 0000000..862f8ef --- /dev/null +++ b/lib/ansible/galaxy/data/apb/meta/main.yml.j2 @@ -0,0 +1,44 @@ +galaxy_info: + author: {{ author }} + description: {{ description }} + company: {{ company }} + + # If the issue tracker for your role is not on github, uncomment the + # next line and provide a value + # issue_tracker_url: {{ issue_tracker_url }} + + # Choose a valid license ID from https://spdx.org - some suggested licenses: + # - BSD-3-Clause (default) + # - MIT + # - GPL-2.0-or-later + # - GPL-3.0-only + # - Apache-2.0 + # - CC-BY-4.0 + license: {{ license }} + + # + # platforms is a list of platforms, and each platform has a name and a list of versions. + # + # platforms: + # - name: Fedora + # versions: + # - all + # - 25 + # - name: SomePlatform + # versions: + # - all + # - 1.0 + # - 7 + # - 99.99 + + galaxy_tags: + - apb + # List tags for your role here, one per line. A tag is a keyword that describes + # and categorizes the role. Users find roles by searching for tags. + # + # NOTE: A tag is limited to a single word comprised of alphanumeric characters. + # Maximum 20 tags per role. + +dependencies: [] + # List your role dependencies here, one per line. Be sure to remove the '[]' above, + # if you add dependencies to this list. diff --git a/lib/ansible/galaxy/data/apb/playbooks/deprovision.yml.j2 b/lib/ansible/galaxy/data/apb/playbooks/deprovision.yml.j2 new file mode 100644 index 0000000..1952731 --- /dev/null +++ b/lib/ansible/galaxy/data/apb/playbooks/deprovision.yml.j2 @@ -0,0 +1,8 @@ +- name: "{{ role_name }} playbook to deprovision the application" + hosts: localhost + gather_facts: false + connection: local + vars: + apb_action: deprovision + roles: + - role: {{ role_name }} diff --git a/lib/ansible/galaxy/data/apb/playbooks/provision.yml.j2 b/lib/ansible/galaxy/data/apb/playbooks/provision.yml.j2 new file mode 100644 index 0000000..7b08605 --- /dev/null +++ b/lib/ansible/galaxy/data/apb/playbooks/provision.yml.j2 @@ -0,0 +1,8 @@ +- name: "{{ role_name }} playbook to provision the application" + hosts: localhost + gather_facts: false + connection: local + vars: + apb_action: provision + roles: + - role: {{ role_name }} diff --git a/lib/ansible/galaxy/data/apb/tasks/main.yml.j2 b/lib/ansible/galaxy/data/apb/tasks/main.yml.j2 new file mode 100644 index 0000000..a988065 --- /dev/null +++ b/lib/ansible/galaxy/data/apb/tasks/main.yml.j2 @@ -0,0 +1,2 @@ +--- +# tasks file for {{ role_name }} diff --git a/lib/ansible/galaxy/data/apb/templates/.git_keep b/lib/ansible/galaxy/data/apb/templates/.git_keep new file mode 100644 index 0000000..e69de29 --- /dev/null +++ b/lib/ansible/galaxy/data/apb/templates/.git_keep diff --git a/lib/ansible/galaxy/data/apb/tests/ansible.cfg b/lib/ansible/galaxy/data/apb/tests/ansible.cfg new file mode 100644 index 0000000..2f74f1b --- /dev/null +++ b/lib/ansible/galaxy/data/apb/tests/ansible.cfg @@ -0,0 +1,2 @@ +[defaults] +inventory=./inventory diff --git a/lib/ansible/galaxy/data/apb/tests/inventory b/lib/ansible/galaxy/data/apb/tests/inventory new file mode 100644 index 0000000..ea69cbf --- /dev/null +++ b/lib/ansible/galaxy/data/apb/tests/inventory @@ -0,0 +1,3 @@ +localhost + + diff --git a/lib/ansible/galaxy/data/apb/tests/test.yml.j2 b/lib/ansible/galaxy/data/apb/tests/test.yml.j2 new file mode 100644 index 0000000..fb14f85 --- /dev/null +++ b/lib/ansible/galaxy/data/apb/tests/test.yml.j2 @@ -0,0 +1,7 @@ +--- +- hosts: localhost + gather_facts: no + connection: local + tasks: + + # Add tasks and assertions for testing the service here. diff --git a/lib/ansible/galaxy/data/apb/vars/main.yml.j2 b/lib/ansible/galaxy/data/apb/vars/main.yml.j2 new file mode 100644 index 0000000..092d511 --- /dev/null +++ b/lib/ansible/galaxy/data/apb/vars/main.yml.j2 @@ -0,0 +1,2 @@ +--- +# vars file for {{ role_name }} diff --git a/lib/ansible/galaxy/data/collections_galaxy_meta.yml b/lib/ansible/galaxy/data/collections_galaxy_meta.yml new file mode 100644 index 0000000..5c4472c --- /dev/null +++ b/lib/ansible/galaxy/data/collections_galaxy_meta.yml @@ -0,0 +1,120 @@ +# Copyright (c) 2019 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# key: The name of the key as defined in galaxy.yml +# description: Comment/info on the key to be used as the generated doc and auto generated skeleton galaxy.yml file +# required: Whether the key is required (default is no) +# type: The type of value that can be set, aligns to the values in the plugin formatter +--- +- key: namespace + description: + - The namespace of the collection. + - This can be a company/brand/organization or product namespace under which all content lives. + - May only contain alphanumeric lowercase characters and underscores. Namespaces cannot start with underscores or + numbers and cannot contain consecutive underscores. + required: yes + type: str + +- key: name + description: + - The name of the collection. + - Has the same character restrictions as C(namespace). + required: yes + type: str + +- key: version + description: + - The version of the collection. + - Must be compatible with semantic versioning. + required: yes + type: str + +- key: readme + description: + - The path to the Markdown (.md) readme file. + - This path is relative to the root of the collection. + required: yes + type: str + +- key: authors + description: + - A list of the collection's content authors. + - Can be just the name or in the format 'Full Name <email> (url) @nicks:irc/im.site#channel'. + required: yes + type: list + +- key: description + description: + - A short summary description of the collection. + type: str + +- key: license + description: + - Either a single license or a list of licenses for content inside of a collection. + - Ansible Galaxy currently only accepts L(SPDX,https://spdx.org/licenses/) licenses + - This key is mutually exclusive with C(license_file). + type: list + +- key: license_file + description: + - The path to the license file for the collection. + - This path is relative to the root of the collection. + - This key is mutually exclusive with C(license). + type: str + +- key: tags + description: + - A list of tags you want to associate with the collection for indexing/searching. + - A tag name has the same character requirements as C(namespace) and C(name). + type: list + +- key: dependencies + description: + - Collections that this collection requires to be installed for it to be usable. + - The key of the dict is the collection label C(namespace.name). + - The value is a version range + L(specifiers,https://python-semanticversion.readthedocs.io/en/latest/#requirement-specification). + - Multiple version range specifiers can be set and are separated by C(,). + type: dict + +- key: repository + description: + - The URL of the originating SCM repository. + type: str + +- key: documentation + description: + - The URL to any online docs. + type: str + +- key: homepage + description: + - The URL to the homepage of the collection/project. + type: str + +- key: issues + description: + - The URL to the collection issue tracker. + type: str + +- key: build_ignore + description: + - A list of file glob-like patterns used to filter any files or directories + that should not be included in the build artifact. + - A pattern is matched from the relative path of the file or directory of the + collection directory. + - This uses C(fnmatch) to match the files or directories. + - Some directories and files like C(galaxy.yml), C(*.pyc), C(*.retry), and + C(.git) are always filtered. + - Mutually exclusive with C(manifest) + type: list + version_added: '2.10' + +- key: manifest + description: + - A dict controlling use of manifest directives used in building the collection artifact. + - The key C(directives) is a list of MANIFEST.in style L(directives,https://packaging.python.org/en/latest/guides/using-manifest-in/#manifest-in-commands) + - The key C(omit_default_directives) is a boolean that controls whether the default directives are used + - Mutually exclusive with C(build_ignore) + type: sentinel + version_added: '2.14' diff --git a/lib/ansible/galaxy/data/container/README.md b/lib/ansible/galaxy/data/container/README.md new file mode 100644 index 0000000..1b66bdb --- /dev/null +++ b/lib/ansible/galaxy/data/container/README.md @@ -0,0 +1,49 @@ +# Role Name + +Adds a <SERVICE_NAME> service to your [Ansible Container](https://github.com/ansible/ansible-container) project. Run the following commands +to install the service: + +``` +# Set the working directory to your Ansible Container project root +$ cd myproject + +# Install the service +$ ansible-container install <USERNAME.ROLE_NAME> +``` + +## Requirements + +- [Ansible Container](https://github.com/ansible/ansible-container) +- An existing Ansible Container project. To create a project, simply run the following: + ``` + # Create an empty project directory + $ mkdir myproject + + # Set the working directory to the new directory + $ cd myproject + + # Initialize the project + $ ansible-container init + ``` + +- Continue listing any prerequisites here... + + +## Role Variables + +A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set +via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. + +## Dependencies + +A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. + +## License + +BSD + +## Author Information + +An optional section for the role authors to include contact information, or a website (HTML is not allowed). + + diff --git a/lib/ansible/galaxy/data/container/defaults/main.yml.j2 b/lib/ansible/galaxy/data/container/defaults/main.yml.j2 new file mode 100644 index 0000000..3818e64 --- /dev/null +++ b/lib/ansible/galaxy/data/container/defaults/main.yml.j2 @@ -0,0 +1,2 @@ +--- +# defaults file for {{ role_name }} diff --git a/lib/ansible/galaxy/data/container/files/.git_keep b/lib/ansible/galaxy/data/container/files/.git_keep new file mode 100644 index 0000000..e69de29 --- /dev/null +++ b/lib/ansible/galaxy/data/container/files/.git_keep diff --git a/lib/ansible/galaxy/data/container/handlers/main.yml.j2 b/lib/ansible/galaxy/data/container/handlers/main.yml.j2 new file mode 100644 index 0000000..3f4c496 --- /dev/null +++ b/lib/ansible/galaxy/data/container/handlers/main.yml.j2 @@ -0,0 +1,2 @@ +--- +# handlers file for {{ role_name }} diff --git a/lib/ansible/galaxy/data/container/meta/container.yml.j2 b/lib/ansible/galaxy/data/container/meta/container.yml.j2 new file mode 100644 index 0000000..f033d34 --- /dev/null +++ b/lib/ansible/galaxy/data/container/meta/container.yml.j2 @@ -0,0 +1,11 @@ +# Add your Ansible Container service definitions here. +# For example: + # + # web: + # image: ubuntu:trusty + # ports: + # - "80:80" + # command: ['/usr/bin/dumb-init', '/usr/sbin/apache2ctl', '-D', 'FOREGROUND'] + # dev_overrides: + # environment: + # - "DEBUG=1" diff --git a/lib/ansible/galaxy/data/container/meta/main.yml.j2 b/lib/ansible/galaxy/data/container/meta/main.yml.j2 new file mode 100644 index 0000000..72fc9a2 --- /dev/null +++ b/lib/ansible/galaxy/data/container/meta/main.yml.j2 @@ -0,0 +1,52 @@ +galaxy_info: + author: {{ author }} + description: {{ description }} + company: {{ company }} + + # If the issue tracker for your role is not on github, uncomment the + # next line and provide a value + # issue_tracker_url: {{ issue_tracker_url }} + + # Choose a valid license ID from https://spdx.org - some suggested licenses: + # - BSD-3-Clause (default) + # - MIT + # - GPL-2.0-or-later + # - GPL-3.0-only + # - Apache-2.0 + # - CC-BY-4.0 + license: {{ license }} + + min_ansible_container_version: 0.2.0 + + # If Ansible is required outside of the build container, provide the minimum version: + # min_ansible_version: + + # + # Provide a list of supported platforms, and for each platform a list of versions. + # If you don't wish to enumerate all versions for a particular platform, use 'all'. + # To view available platforms and versions (or releases), visit: + # https://galaxy.ansible.com/api/v1/platforms/ + # + # platforms: + # - name: Fedora + # versions: + # - all + # - 25 + # - name: SomePlatform + # versions: + # - all + # - 1.0 + # - 7 + # - 99.99 + + galaxy_tags: + - container + # List tags for your role here, one per line. A tag is a keyword that describes + # and categorizes the role. Users find roles by searching for tags. + # + # NOTE: A tag is limited to a single word comprised of alphanumeric characters. + # Maximum 20 tags per role. + +dependencies: [] + # List your role dependencies here, one per line. Be sure to remove the '[]' above, + # if you add dependencies to this list. diff --git a/lib/ansible/galaxy/data/container/tasks/main.yml.j2 b/lib/ansible/galaxy/data/container/tasks/main.yml.j2 new file mode 100644 index 0000000..a988065 --- /dev/null +++ b/lib/ansible/galaxy/data/container/tasks/main.yml.j2 @@ -0,0 +1,2 @@ +--- +# tasks file for {{ role_name }} diff --git a/lib/ansible/galaxy/data/container/templates/.git_keep b/lib/ansible/galaxy/data/container/templates/.git_keep new file mode 100644 index 0000000..e69de29 --- /dev/null +++ b/lib/ansible/galaxy/data/container/templates/.git_keep diff --git a/lib/ansible/galaxy/data/container/tests/ansible.cfg b/lib/ansible/galaxy/data/container/tests/ansible.cfg new file mode 100644 index 0000000..2f74f1b --- /dev/null +++ b/lib/ansible/galaxy/data/container/tests/ansible.cfg @@ -0,0 +1,2 @@ +[defaults] +inventory=./inventory diff --git a/lib/ansible/galaxy/data/container/tests/inventory b/lib/ansible/galaxy/data/container/tests/inventory new file mode 100644 index 0000000..ea69cbf --- /dev/null +++ b/lib/ansible/galaxy/data/container/tests/inventory @@ -0,0 +1,3 @@ +localhost + + diff --git a/lib/ansible/galaxy/data/container/tests/test.yml.j2 b/lib/ansible/galaxy/data/container/tests/test.yml.j2 new file mode 100644 index 0000000..fb14f85 --- /dev/null +++ b/lib/ansible/galaxy/data/container/tests/test.yml.j2 @@ -0,0 +1,7 @@ +--- +- hosts: localhost + gather_facts: no + connection: local + tasks: + + # Add tasks and assertions for testing the service here. diff --git a/lib/ansible/galaxy/data/container/vars/main.yml.j2 b/lib/ansible/galaxy/data/container/vars/main.yml.j2 new file mode 100644 index 0000000..092d511 --- /dev/null +++ b/lib/ansible/galaxy/data/container/vars/main.yml.j2 @@ -0,0 +1,2 @@ +--- +# vars file for {{ role_name }} diff --git a/lib/ansible/galaxy/data/default/collection/README.md.j2 b/lib/ansible/galaxy/data/default/collection/README.md.j2 new file mode 100644 index 0000000..5e51622 --- /dev/null +++ b/lib/ansible/galaxy/data/default/collection/README.md.j2 @@ -0,0 +1,3 @@ +# Ansible Collection - {{ namespace }}.{{ collection_name }} + +Documentation for the collection. diff --git a/lib/ansible/galaxy/data/default/collection/docs/.git_keep b/lib/ansible/galaxy/data/default/collection/docs/.git_keep new file mode 100644 index 0000000..e69de29 --- /dev/null +++ b/lib/ansible/galaxy/data/default/collection/docs/.git_keep diff --git a/lib/ansible/galaxy/data/default/collection/galaxy.yml.j2 b/lib/ansible/galaxy/data/default/collection/galaxy.yml.j2 new file mode 100644 index 0000000..7821491 --- /dev/null +++ b/lib/ansible/galaxy/data/default/collection/galaxy.yml.j2 @@ -0,0 +1,16 @@ +### REQUIRED +{% for option in required_config %} +{{ option.description | comment_ify }} +{{ {option.key: option.value} | to_nice_yaml }} +{% endfor %} + +### OPTIONAL but strongly recommended +{% for option in optional_config %} +{{ option.description | comment_ify }} +{% if option.key == 'manifest' %} +{{ {option.key: option.value} | to_nice_yaml | comment_ify }} + +{% else %} +{{ {option.key: option.value} | to_nice_yaml }} +{% endif %} +{% endfor %} diff --git a/lib/ansible/galaxy/data/default/collection/meta/runtime.yml b/lib/ansible/galaxy/data/default/collection/meta/runtime.yml new file mode 100644 index 0000000..20f709e --- /dev/null +++ b/lib/ansible/galaxy/data/default/collection/meta/runtime.yml @@ -0,0 +1,52 @@ +--- +# Collections must specify a minimum required ansible version to upload +# to galaxy +# requires_ansible: '>=2.9.10' + +# Content that Ansible needs to load from another location or that has +# been deprecated/removed +# plugin_routing: +# action: +# redirected_plugin_name: +# redirect: ns.col.new_location +# deprecated_plugin_name: +# deprecation: +# removal_version: "4.0.0" +# warning_text: | +# See the porting guide on how to update your playbook to +# use ns.col.another_plugin instead. +# removed_plugin_name: +# tombstone: +# removal_version: "2.0.0" +# warning_text: | +# See the porting guide on how to update your playbook to +# use ns.col.another_plugin instead. +# become: +# cache: +# callback: +# cliconf: +# connection: +# doc_fragments: +# filter: +# httpapi: +# inventory: +# lookup: +# module_utils: +# modules: +# netconf: +# shell: +# strategy: +# terminal: +# test: +# vars: + +# Python import statements that Ansible needs to load from another location +# import_redirection: +# ansible_collections.ns.col.plugins.module_utils.old_location: +# redirect: ansible_collections.ns.col.plugins.module_utils.new_location + +# Groups of actions/modules that take a common set of options +# action_groups: +# group_name: +# - module1 +# - module2 diff --git a/lib/ansible/galaxy/data/default/collection/plugins/README.md.j2 b/lib/ansible/galaxy/data/default/collection/plugins/README.md.j2 new file mode 100644 index 0000000..7c006cf --- /dev/null +++ b/lib/ansible/galaxy/data/default/collection/plugins/README.md.j2 @@ -0,0 +1,31 @@ +# Collections Plugins Directory + +This directory can be used to ship various plugins inside an Ansible collection. Each plugin is placed in a folder that +is named after the type of plugin it is in. It can also include the `module_utils` and `modules` directory that +would contain module utils and modules respectively. + +Here is an example directory of the majority of plugins currently supported by Ansible: + +``` +└── plugins + ├── action + ├── become + ├── cache + ├── callback + ├── cliconf + ├── connection + ├── filter + ├── httpapi + ├── inventory + ├── lookup + ├── module_utils + ├── modules + ├── netconf + ├── shell + ├── strategy + ├── terminal + ├── test + └── vars +``` + +A full list of plugin types can be found at [Working With Plugins]({{ ansible_plugin_list_dir }}). diff --git a/lib/ansible/galaxy/data/default/collection/roles/.git_keep b/lib/ansible/galaxy/data/default/collection/roles/.git_keep new file mode 100644 index 0000000..e69de29 --- /dev/null +++ b/lib/ansible/galaxy/data/default/collection/roles/.git_keep diff --git a/lib/ansible/galaxy/data/default/role/README.md b/lib/ansible/galaxy/data/default/role/README.md new file mode 100644 index 0000000..225dd44 --- /dev/null +++ b/lib/ansible/galaxy/data/default/role/README.md @@ -0,0 +1,38 @@ +Role Name +========= + +A brief description of the role goes here. + +Requirements +------------ + +Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. + +Role Variables +-------------- + +A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. + +Dependencies +------------ + +A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. + +Example Playbook +---------------- + +Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: + + - hosts: servers + roles: + - { role: username.rolename, x: 42 } + +License +------- + +BSD + +Author Information +------------------ + +An optional section for the role authors to include contact information, or a website (HTML is not allowed). diff --git a/lib/ansible/galaxy/data/default/role/defaults/main.yml.j2 b/lib/ansible/galaxy/data/default/role/defaults/main.yml.j2 new file mode 100644 index 0000000..3818e64 --- /dev/null +++ b/lib/ansible/galaxy/data/default/role/defaults/main.yml.j2 @@ -0,0 +1,2 @@ +--- +# defaults file for {{ role_name }} diff --git a/lib/ansible/galaxy/data/default/role/files/.git_keep b/lib/ansible/galaxy/data/default/role/files/.git_keep new file mode 100644 index 0000000..e69de29 --- /dev/null +++ b/lib/ansible/galaxy/data/default/role/files/.git_keep diff --git a/lib/ansible/galaxy/data/default/role/handlers/main.yml.j2 b/lib/ansible/galaxy/data/default/role/handlers/main.yml.j2 new file mode 100644 index 0000000..3f4c496 --- /dev/null +++ b/lib/ansible/galaxy/data/default/role/handlers/main.yml.j2 @@ -0,0 +1,2 @@ +--- +# handlers file for {{ role_name }} diff --git a/lib/ansible/galaxy/data/default/role/meta/main.yml.j2 b/lib/ansible/galaxy/data/default/role/meta/main.yml.j2 new file mode 100644 index 0000000..4891a68 --- /dev/null +++ b/lib/ansible/galaxy/data/default/role/meta/main.yml.j2 @@ -0,0 +1,55 @@ +galaxy_info: + author: {{ author }} + description: {{ description }} + company: {{ company }} + + # If the issue tracker for your role is not on github, uncomment the + # next line and provide a value + # issue_tracker_url: {{ issue_tracker_url }} + + # Choose a valid license ID from https://spdx.org - some suggested licenses: + # - BSD-3-Clause (default) + # - MIT + # - GPL-2.0-or-later + # - GPL-3.0-only + # - Apache-2.0 + # - CC-BY-4.0 + license: {{ license }} + + min_ansible_version: {{ min_ansible_version }} + + # If this a Container Enabled role, provide the minimum Ansible Container version. + # min_ansible_container_version: + + # + # Provide a list of supported platforms, and for each platform a list of versions. + # If you don't wish to enumerate all versions for a particular platform, use 'all'. + # To view available platforms and versions (or releases), visit: + # https://galaxy.ansible.com/api/v1/platforms/ + # + # platforms: + # - name: Fedora + # versions: + # - all + # - 25 + # - name: SomePlatform + # versions: + # - all + # - 1.0 + # - 7 + # - 99.99 + + galaxy_tags: [] + # List tags for your role here, one per line. A tag is a keyword that describes + # and categorizes the role. Users find roles by searching for tags. Be sure to + # remove the '[]' above, if you add tags to this list. + # + # NOTE: A tag is limited to a single word comprised of alphanumeric characters. + # Maximum 20 tags per role. + +dependencies: [] + # List your role dependencies here, one per line. Be sure to remove the '[]' above, + # if you add dependencies to this list. +{% for dependency in dependencies %} + #- {{ dependency }} +{%- endfor %} diff --git a/lib/ansible/galaxy/data/default/role/tasks/main.yml.j2 b/lib/ansible/galaxy/data/default/role/tasks/main.yml.j2 new file mode 100644 index 0000000..a988065 --- /dev/null +++ b/lib/ansible/galaxy/data/default/role/tasks/main.yml.j2 @@ -0,0 +1,2 @@ +--- +# tasks file for {{ role_name }} diff --git a/lib/ansible/galaxy/data/default/role/templates/.git_keep b/lib/ansible/galaxy/data/default/role/templates/.git_keep new file mode 100644 index 0000000..e69de29 --- /dev/null +++ b/lib/ansible/galaxy/data/default/role/templates/.git_keep diff --git a/lib/ansible/galaxy/data/default/role/tests/inventory b/lib/ansible/galaxy/data/default/role/tests/inventory new file mode 100644 index 0000000..878877b --- /dev/null +++ b/lib/ansible/galaxy/data/default/role/tests/inventory @@ -0,0 +1,2 @@ +localhost + diff --git a/lib/ansible/galaxy/data/default/role/tests/test.yml.j2 b/lib/ansible/galaxy/data/default/role/tests/test.yml.j2 new file mode 100644 index 0000000..0c40f95 --- /dev/null +++ b/lib/ansible/galaxy/data/default/role/tests/test.yml.j2 @@ -0,0 +1,5 @@ +--- +- hosts: localhost + remote_user: root + roles: + - {{ role_name }} diff --git a/lib/ansible/galaxy/data/default/role/vars/main.yml.j2 b/lib/ansible/galaxy/data/default/role/vars/main.yml.j2 new file mode 100644 index 0000000..092d511 --- /dev/null +++ b/lib/ansible/galaxy/data/default/role/vars/main.yml.j2 @@ -0,0 +1,2 @@ +--- +# vars file for {{ role_name }} diff --git a/lib/ansible/galaxy/data/network/README.md b/lib/ansible/galaxy/data/network/README.md new file mode 100644 index 0000000..84533c6 --- /dev/null +++ b/lib/ansible/galaxy/data/network/README.md @@ -0,0 +1,38 @@ +Role Name +========= + +A brief description of the role goes here. + +Requirements +------------ + +Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses any vendor specific SDKs or module with specific dependencies, it may be a good idea to mention in this section that the package is required. + +Role Variables +-------------- + +A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. + +Dependencies +------------ + +A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. + +Example Playbook +---------------- + +Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: + + - hosts: servers + roles: + - { role: username.rolename, x: 42 } + +License +------- + +BSD + +Author Information +------------------ + +An optional section for the role authors to include contact information, or a website (HTML is not allowed). diff --git a/lib/ansible/galaxy/data/network/cliconf_plugins/example.py.j2 b/lib/ansible/galaxy/data/network/cliconf_plugins/example.py.j2 new file mode 100644 index 0000000..02f234a --- /dev/null +++ b/lib/ansible/galaxy/data/network/cliconf_plugins/example.py.j2 @@ -0,0 +1,40 @@ +# +# (c) 2018 Red Hat Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see <http://www.gnu.org/licenses/>. +# +from __future__ import (absolute_import, division, print_function) +from ansible.errors import AnsibleError +__metaclass__ = type + +try: + from ansible.plugins.cliconf import CliconfBase + """ + Examples: + https://github.com/ansible/ansible/blob/devel/lib/ansible/plugins/cliconf/iosxr.py + https://github.com/ansible/ansible/blob/devel/lib/ansible/plugins/cliconf/junos.py + """ +except ImportError: + raise AnsibleError("Cliconf Plugin [ {{ role_name }} ]: Dependency not satisfied") + + +class Cliconf(CliconfBase): + """ + Examples: + https://github.com/ansible/ansible/blob/devel/lib/ansible/plugins/cliconf/iosxr.py + https://github.com/ansible/ansible/blob/devel/lib/ansible/plugins/cliconf/junos.py + """ + raise AnsibleError("Cliconf Plugin [ {{ role_name }} ]: Not implemented") diff --git a/lib/ansible/galaxy/data/network/defaults/main.yml.j2 b/lib/ansible/galaxy/data/network/defaults/main.yml.j2 new file mode 100644 index 0000000..3818e64 --- /dev/null +++ b/lib/ansible/galaxy/data/network/defaults/main.yml.j2 @@ -0,0 +1,2 @@ +--- +# defaults file for {{ role_name }} diff --git a/lib/ansible/galaxy/data/network/files/.git_keep b/lib/ansible/galaxy/data/network/files/.git_keep new file mode 100644 index 0000000..e69de29 --- /dev/null +++ b/lib/ansible/galaxy/data/network/files/.git_keep diff --git a/lib/ansible/galaxy/data/network/library/example_command.py.j2 b/lib/ansible/galaxy/data/network/library/example_command.py.j2 new file mode 100644 index 0000000..0f3dac2 --- /dev/null +++ b/lib/ansible/galaxy/data/network/library/example_command.py.j2 @@ -0,0 +1,66 @@ +# +# (c) 2018 Red Hat Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see <http://www.gnu.org/licenses/>. +# +from __future__ import (absolute_import, division, print_function) +from ansible.errors import AnsibleError +__metaclass__ = type + + +### Documentation +DOCUMENTATION = """ + Examples: + https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/iosxr/iosxr_command.py + https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/junos/junos_command.py +""" + +EXAMPLES = """ + Examples: + https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/iosxr/iosxr_command.py + https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/junos/junos_command.py +""" + + +RETURN = """ + Examples: + https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/iosxr/iosxr_command.py + https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/junos/junos_command.py +""" + +#### Imports +try: + from ansible.module_utils.basic import AnsibleModule + """ + Examples: + https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/iosxr/iosxr_command.py + https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/junos/junos_command.py + """ +except ImportError: + raise AnsibleError("[ {{ role_name }}_command ]: Dependency not satisfied") + +#### Implementation +def main(): + """ + Examples: + https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/iosxr/iosxr_command.py + https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/junos/junos_command.py + """ + raise AnsibleError(" [ {{ role_name }}_command ]: Not Implemented") + +#### Entrypoint +if __name__ == '__main__': + main() diff --git a/lib/ansible/galaxy/data/network/library/example_config.py.j2 b/lib/ansible/galaxy/data/network/library/example_config.py.j2 new file mode 100644 index 0000000..2c2c72b --- /dev/null +++ b/lib/ansible/galaxy/data/network/library/example_config.py.j2 @@ -0,0 +1,66 @@ +# +# (c) 2018 Red Hat Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see <http://www.gnu.org/licenses/>. +# +from __future__ import (absolute_import, division, print_function) +from ansible.errors import AnsibleError +__metaclass__ = type + + +### Documentation +DOCUMENTATION = """ + Examples: + https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/iosxr/iosxr_config.py + https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/junos/junos_config.py +""" + +EXAMPLES = """ + Examples: + https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/iosxr/iosxr_config.py + https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/junos/junos_config.py +""" + + +RETURN = """ + Examples: + https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/iosxr/iosxr_config.py + https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/junos/junos_config.py +""" + +### Imports +try: + from ansible.module_utils.basic import AnsibleModule + """ + Examples: + https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/iosxr/iosxr_config.py + https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/junos/junos_config.py + """ +except ImportError: + raise AnsibleError("[ {{ role_name }}_config ]: Dependency not satisfied") + +### Implementation +def main(): + """ + Examples: + https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/iosxr/iosxr_config.py + https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/junos/junos_config.py + """ + raise AnsibleError(" [ {{ role_name }}_config ]: Not Implemented") + +### Entrypoint +if __name__ == '__main__': + main() diff --git a/lib/ansible/galaxy/data/network/library/example_facts.py.j2 b/lib/ansible/galaxy/data/network/library/example_facts.py.j2 new file mode 100644 index 0000000..9f7608c --- /dev/null +++ b/lib/ansible/galaxy/data/network/library/example_facts.py.j2 @@ -0,0 +1,66 @@ +# +# (c) 2018 Red Hat Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see <http://www.gnu.org/licenses/>. +# +from __future__ import (absolute_import, division, print_function) +from ansible.errors import AnsibleError +__metaclass__ = type + + +### Documentation +DOCUMENTATION = """ + Examples: + https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/iosxr/iosxr_facts.py + https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/junos/junos_facts.py +""" + +EXAMPLES = """ + Examples: + https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/iosxr/iosxr_facts.py + https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/junos/junos_facts.py +""" + + +RETURN = """ + Examples: + https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/iosxr/iosxr_facts.py + https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/junos/junos_facts.py +""" + +### Imports +try: + from ansible.module_utils.basic import AnsibleModule + """ + Examples: + https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/iosxr/iosxr_facts.py + https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/junos/junos_facts.py + """ +except ImportError: + raise AnsibleError("[ {{ role_name }}_facts ]: Dependency not satisfied") + +### Implementation +def main(): + """ + Examples: + https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/iosxr/iosxr_facts.py + https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/junos/junos_facts.py + """ + raise AnsibleError(" [ {{ role_name }}_facts ]: Not Implemented") + +### Entrypoint +if __name__ == '__main__': + main() diff --git a/lib/ansible/galaxy/data/network/meta/main.yml.j2 b/lib/ansible/galaxy/data/network/meta/main.yml.j2 new file mode 100644 index 0000000..d0184ae --- /dev/null +++ b/lib/ansible/galaxy/data/network/meta/main.yml.j2 @@ -0,0 +1,52 @@ +galaxy_info: + author: {{ author }} + description: {{ description }} + company: {{ company }} + + # If the issue tracker for your role is not on github, uncomment the + # next line and provide a value + # issue_tracker_url: {{ issue_tracker_url }} + + # Choose a valid license ID from https://spdx.org - some suggested licenses: + # - BSD-3-Clause (default) + # - MIT + # - GPL-2.0-or-later + # - GPL-3.0-only + # - Apache-2.0 + # - CC-BY-4.0 + license: {{ license }} + + min_ansible_version: {{ min_ansible_version }} + + # If this a Container Enabled role, provide the minimum Ansible Container version. + # min_ansible_container_version: + + # + # platforms is a list of platforms, and each platform has a name and a list of versions. + # + # platforms: + # - name: VYOS + # versions: + # - all + # - 25 + # - name: SomePlatform + # versions: + # - all + # - 1.0 + # - 7 + # - 99.99 + + galaxy_tags: [] + # List tags for your role here, one per line. A tag is a keyword that describes + # and categorizes the role. Users find roles by searching for tags. Be sure to + # remove the '[]' above, if you add tags to this list. + # + # NOTE: A tag is limited to a single word comprised of alphanumeric characters. + # Maximum 20 tags per role. + +dependencies: [] + # List your role dependencies here, one per line. Be sure to remove the '[]' above, + # if you add dependencies to this list. +{%- for dependency in dependencies %} + #- {{ dependency }} +{%- endfor %} diff --git a/lib/ansible/galaxy/data/network/module_utils/example.py.j2 b/lib/ansible/galaxy/data/network/module_utils/example.py.j2 new file mode 100644 index 0000000..9bf2d3f --- /dev/null +++ b/lib/ansible/galaxy/data/network/module_utils/example.py.j2 @@ -0,0 +1,40 @@ +# +# (c) 2018 Red Hat Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see <http://www.gnu.org/licenses/>. +# +from __future__ import (absolute_import, division, print_function) +from ansible.errors import AnsibleError +__metaclass__ = type + +### Imports +try: + from ansible.module_utils.basic import env_fallback, return_values + from ansible.module_utils.connection import Connection + """ + Examples: + https://github.com/ansible/ansible/blob/devel/lib/ansible/module_utils/network/iosxr/iosxr.py + https://github.com/ansible/ansible/blob/devel/lib/ansible/module_utils/network/junos//junos.py + """ +except ImportError: + raise AnsibleError("Netconf Plugin [ {{ role_name }} ]: Dependency not satisfied") + +### Implementation +""" + Examples: + https://github.com/ansible/ansible/blob/devel/lib/ansible/module_utils/network/iosxr/iosxr.py + https://github.com/ansible/ansible/blob/devel/lib/ansible/module_utils/network/junos//junos.py +""" diff --git a/lib/ansible/galaxy/data/network/netconf_plugins/example.py.j2 b/lib/ansible/galaxy/data/network/netconf_plugins/example.py.j2 new file mode 100644 index 0000000..e3a1ce6 --- /dev/null +++ b/lib/ansible/galaxy/data/network/netconf_plugins/example.py.j2 @@ -0,0 +1,40 @@ +# +# (c) 2018 Red Hat Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see <http://www.gnu.org/licenses/>. +# +from __future__ import (absolute_import, division, print_function) +from ansible.errors import AnsibleError +__metaclass__ = type + +try: + from ansible.plugins.terminal import NetconfBase + """ + Examples: + https://github.com/ansible/ansible/blob/devel/lib/ansible/plugins/netconf/iosxr.py + https://github.com/ansible/ansible/blob/devel/lib/ansible/plugins/netconf/junos.py + """ +except ImportError: + raise AnsibleError("Netconf Plugin [ {{ role_name }} ]: Dependency not satisfied") + + +class Netconf(NetconfBase): + """ + Examples: + https://github.com/ansible/ansible/blob/devel/lib/ansible/plugins/netconf/iosxr.py + https://github.com/ansible/ansible/blob/devel/lib/ansible/plugins/netconf/junos.py + """ + raise AnsibleError("Netconf Plugin [ {{ role_name }} ]: Not implemented") diff --git a/lib/ansible/galaxy/data/network/tasks/main.yml.j2 b/lib/ansible/galaxy/data/network/tasks/main.yml.j2 new file mode 100644 index 0000000..a988065 --- /dev/null +++ b/lib/ansible/galaxy/data/network/tasks/main.yml.j2 @@ -0,0 +1,2 @@ +--- +# tasks file for {{ role_name }} diff --git a/lib/ansible/galaxy/data/network/templates/.git_keep b/lib/ansible/galaxy/data/network/templates/.git_keep new file mode 100644 index 0000000..e69de29 --- /dev/null +++ b/lib/ansible/galaxy/data/network/templates/.git_keep diff --git a/lib/ansible/galaxy/data/network/terminal_plugins/example.py.j2 b/lib/ansible/galaxy/data/network/terminal_plugins/example.py.j2 new file mode 100644 index 0000000..621a140 --- /dev/null +++ b/lib/ansible/galaxy/data/network/terminal_plugins/example.py.j2 @@ -0,0 +1,40 @@ +# +# (c) 2018 Red Hat Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see <http://www.gnu.org/licenses/>. +# +from __future__ import (absolute_import, division, print_function) +from ansible.errors import AnsibleError +__metaclass__ = type + +try: + from ansible.plugins.terminal import TerminalBase + """ + Examples: + https://github.com/ansible/ansible/blob/devel/lib/ansible/plugins/terminal/iosxr.py + https://github.com/ansible/ansible/blob/devel/lib/ansible/plugins/terminal/junos.py + """ +except ImportError: + raise AnsibleError("Terminal Plugin [ {{ role_name }} ]: Dependency not satisfied") + + +class TerminalModule(TerminalBase): + """ + Examples: + https://github.com/ansible/ansible/blob/devel/lib/ansible/plugins/terminal/iosxr.py + https://github.com/ansible/ansible/blob/devel/lib/ansible/plugins/terminal/junos.py + """ + raise AnsibleError("Terminal Plugin [ {{ role_name }} ]: Not implemented") diff --git a/lib/ansible/galaxy/data/network/tests/inventory b/lib/ansible/galaxy/data/network/tests/inventory new file mode 100644 index 0000000..878877b --- /dev/null +++ b/lib/ansible/galaxy/data/network/tests/inventory @@ -0,0 +1,2 @@ +localhost + diff --git a/lib/ansible/galaxy/data/network/tests/test.yml.j2 b/lib/ansible/galaxy/data/network/tests/test.yml.j2 new file mode 100644 index 0000000..11284eb --- /dev/null +++ b/lib/ansible/galaxy/data/network/tests/test.yml.j2 @@ -0,0 +1,14 @@ +--- +- hosts: localhost + connection: network_cli + gather_facts: False + + roles: + - {{ role_name }} + +- hosts: localhost + connection: netconf + gather_facts: False + + roles: + - {{ role_name }} diff --git a/lib/ansible/galaxy/data/network/vars/main.yml.j2 b/lib/ansible/galaxy/data/network/vars/main.yml.j2 new file mode 100644 index 0000000..092d511 --- /dev/null +++ b/lib/ansible/galaxy/data/network/vars/main.yml.j2 @@ -0,0 +1,2 @@ +--- +# vars file for {{ role_name }} diff --git a/lib/ansible/galaxy/dependency_resolution/__init__.py b/lib/ansible/galaxy/dependency_resolution/__init__.py new file mode 100644 index 0000000..cfde7df --- /dev/null +++ b/lib/ansible/galaxy/dependency_resolution/__init__.py @@ -0,0 +1,55 @@ +# -*- coding: utf-8 -*- +# Copyright: (c) 2020-2021, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +"""Dependency resolution machinery.""" + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import typing as t + +if t.TYPE_CHECKING: + from ansible.galaxy.api import GalaxyAPI + from ansible.galaxy.collection.concrete_artifact_manager import ( + ConcreteArtifactsManager, + ) + from ansible.galaxy.dependency_resolution.dataclasses import ( + Candidate, + Requirement, + ) + +from ansible.galaxy.collection.galaxy_api_proxy import MultiGalaxyAPIProxy +from ansible.galaxy.dependency_resolution.providers import CollectionDependencyProvider +from ansible.galaxy.dependency_resolution.reporters import CollectionDependencyReporter +from ansible.galaxy.dependency_resolution.resolvers import CollectionDependencyResolver + + +def build_collection_dependency_resolver( + galaxy_apis, # type: t.Iterable[GalaxyAPI] + concrete_artifacts_manager, # type: ConcreteArtifactsManager + user_requirements, # type: t.Iterable[Requirement] + preferred_candidates=None, # type: t.Iterable[Candidate] + with_deps=True, # type: bool + with_pre_releases=False, # type: bool + upgrade=False, # type: bool + include_signatures=True, # type: bool + offline=False, # type: bool +): # type: (...) -> CollectionDependencyResolver + """Return a collection dependency resolver. + + The returned instance will have a ``resolve()`` method for + further consumption. + """ + return CollectionDependencyResolver( + CollectionDependencyProvider( + apis=MultiGalaxyAPIProxy(galaxy_apis, concrete_artifacts_manager, offline=offline), + concrete_artifacts_manager=concrete_artifacts_manager, + user_requirements=user_requirements, + preferred_candidates=preferred_candidates, + with_deps=with_deps, + with_pre_releases=with_pre_releases, + upgrade=upgrade, + include_signatures=include_signatures, + ), + CollectionDependencyReporter(), + ) diff --git a/lib/ansible/galaxy/dependency_resolution/dataclasses.py b/lib/ansible/galaxy/dependency_resolution/dataclasses.py new file mode 100644 index 0000000..16fd631 --- /dev/null +++ b/lib/ansible/galaxy/dependency_resolution/dataclasses.py @@ -0,0 +1,573 @@ +# -*- coding: utf-8 -*- +# Copyright: (c) 2020-2021, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +"""Dependency structs.""" +# FIXME: add caching all over the place + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os +import typing as t + +from collections import namedtuple +from collections.abc import MutableSequence, MutableMapping +from glob import iglob +from urllib.parse import urlparse +from yaml import safe_load + +if t.TYPE_CHECKING: + from ansible.galaxy.collection.concrete_artifact_manager import ( + ConcreteArtifactsManager, + ) + Collection = t.TypeVar( + 'Collection', + 'Candidate', 'Requirement', + '_ComputedReqKindsMixin', + ) + + +from ansible.errors import AnsibleError +from ansible.galaxy.api import GalaxyAPI +from ansible.module_utils._text import to_bytes, to_native, to_text +from ansible.module_utils.common.arg_spec import ArgumentSpecValidator +from ansible.utils.collection_loader import AnsibleCollectionRef +from ansible.utils.display import Display + + +_ALLOW_CONCRETE_POINTER_IN_SOURCE = False # NOTE: This is a feature flag +_GALAXY_YAML = b'galaxy.yml' +_MANIFEST_JSON = b'MANIFEST.json' +_SOURCE_METADATA_FILE = b'GALAXY.yml' + +display = Display() + + +def get_validated_source_info(b_source_info_path, namespace, name, version): + source_info_path = to_text(b_source_info_path, errors='surrogate_or_strict') + + if not os.path.isfile(b_source_info_path): + return None + + try: + with open(b_source_info_path, mode='rb') as fd: + metadata = safe_load(fd) + except OSError as e: + display.warning( + f"Error getting collection source information at '{source_info_path}': {to_text(e, errors='surrogate_or_strict')}" + ) + return None + + if not isinstance(metadata, MutableMapping): + display.warning(f"Error getting collection source information at '{source_info_path}': expected a YAML dictionary") + return None + + schema_errors = _validate_v1_source_info_schema(namespace, name, version, metadata) + if schema_errors: + display.warning(f"Ignoring source metadata file at {source_info_path} due to the following errors:") + display.warning("\n".join(schema_errors)) + display.warning("Correct the source metadata file by reinstalling the collection.") + return None + + return metadata + + +def _validate_v1_source_info_schema(namespace, name, version, provided_arguments): + argument_spec_data = dict( + format_version=dict(choices=["1.0.0"]), + download_url=dict(), + version_url=dict(), + server=dict(), + signatures=dict( + type=list, + suboptions=dict( + signature=dict(), + pubkey_fingerprint=dict(), + signing_service=dict(), + pulp_created=dict(), + ) + ), + name=dict(choices=[name]), + namespace=dict(choices=[namespace]), + version=dict(choices=[version]), + ) + + if not isinstance(provided_arguments, dict): + raise AnsibleError( + f'Invalid offline source info for {namespace}.{name}:{version}, expected a dict and got {type(provided_arguments)}' + ) + validator = ArgumentSpecValidator(argument_spec_data) + validation_result = validator.validate(provided_arguments) + + return validation_result.error_messages + + +def _is_collection_src_dir(dir_path): + b_dir_path = to_bytes(dir_path, errors='surrogate_or_strict') + return os.path.isfile(os.path.join(b_dir_path, _GALAXY_YAML)) + + +def _is_installed_collection_dir(dir_path): + b_dir_path = to_bytes(dir_path, errors='surrogate_or_strict') + return os.path.isfile(os.path.join(b_dir_path, _MANIFEST_JSON)) + + +def _is_collection_dir(dir_path): + return ( + _is_installed_collection_dir(dir_path) or + _is_collection_src_dir(dir_path) + ) + + +def _find_collections_in_subdirs(dir_path): + b_dir_path = to_bytes(dir_path, errors='surrogate_or_strict') + + subdir_glob_pattern = os.path.join( + b_dir_path, + # b'*', # namespace is supposed to be top-level per spec + b'*', # collection name + ) + + for subdir in iglob(subdir_glob_pattern): + if os.path.isfile(os.path.join(subdir, _MANIFEST_JSON)): + yield subdir + elif os.path.isfile(os.path.join(subdir, _GALAXY_YAML)): + yield subdir + + +def _is_collection_namespace_dir(tested_str): + return any(_find_collections_in_subdirs(tested_str)) + + +def _is_file_path(tested_str): + return os.path.isfile(to_bytes(tested_str, errors='surrogate_or_strict')) + + +def _is_http_url(tested_str): + return urlparse(tested_str).scheme.lower() in {'http', 'https'} + + +def _is_git_url(tested_str): + return tested_str.startswith(('git+', 'git@')) + + +def _is_concrete_artifact_pointer(tested_str): + return any( + predicate(tested_str) + for predicate in ( + # NOTE: Maintain the checks to be sorted from light to heavy: + _is_git_url, + _is_http_url, + _is_file_path, + _is_collection_dir, + _is_collection_namespace_dir, + ) + ) + + +class _ComputedReqKindsMixin: + + def __init__(self, *args, **kwargs): + if not self.may_have_offline_galaxy_info: + self._source_info = None + else: + info_path = self.construct_galaxy_info_path(to_bytes(self.src, errors='surrogate_or_strict')) + + self._source_info = get_validated_source_info( + info_path, + self.namespace, + self.name, + self.ver + ) + + @classmethod + def from_dir_path_as_unknown( # type: ignore[misc] + cls, # type: t.Type[Collection] + dir_path, # type: bytes + art_mgr, # type: ConcreteArtifactsManager + ): # type: (...) -> Collection + """Make collection from an unspecified dir type. + + This alternative constructor attempts to grab metadata from the + given path if it's a directory. If there's no metadata, it + falls back to guessing the FQCN based on the directory path and + sets the version to "*". + + It raises a ValueError immediately if the input is not an + existing directory path. + """ + if not os.path.isdir(dir_path): + raise ValueError( + "The collection directory '{path!s}' doesn't exist". + format(path=to_native(dir_path)), + ) + + try: + return cls.from_dir_path(dir_path, art_mgr) + except ValueError: + return cls.from_dir_path_implicit(dir_path) + + @classmethod + def from_dir_path(cls, dir_path, art_mgr): + """Make collection from an directory with metadata.""" + b_dir_path = to_bytes(dir_path, errors='surrogate_or_strict') + if not _is_collection_dir(b_dir_path): + display.warning( + u"Collection at '{path!s}' does not have a {manifest_json!s} " + u'file, nor has it {galaxy_yml!s}: cannot detect version.'. + format( + galaxy_yml=to_text(_GALAXY_YAML), + manifest_json=to_text(_MANIFEST_JSON), + path=to_text(dir_path, errors='surrogate_or_strict'), + ), + ) + raise ValueError( + '`dir_path` argument must be an installed or a source' + ' collection directory.', + ) + + tmp_inst_req = cls(None, None, dir_path, 'dir', None) + req_version = art_mgr.get_direct_collection_version(tmp_inst_req) + try: + req_name = art_mgr.get_direct_collection_fqcn(tmp_inst_req) + except TypeError as err: + # Looks like installed/source dir but isn't: doesn't have valid metadata. + display.warning( + u"Collection at '{path!s}' has a {manifest_json!s} " + u"or {galaxy_yml!s} file but it contains invalid metadata.". + format( + galaxy_yml=to_text(_GALAXY_YAML), + manifest_json=to_text(_MANIFEST_JSON), + path=to_text(dir_path, errors='surrogate_or_strict'), + ), + ) + raise ValueError( + "Collection at '{path!s}' has invalid metadata". + format(path=to_text(dir_path, errors='surrogate_or_strict')) + ) from err + + return cls(req_name, req_version, dir_path, 'dir', None) + + @classmethod + def from_dir_path_implicit( # type: ignore[misc] + cls, # type: t.Type[Collection] + dir_path, # type: bytes + ): # type: (...) -> Collection + """Construct a collection instance based on an arbitrary dir. + + This alternative constructor infers the FQCN based on the parent + and current directory names. It also sets the version to "*" + regardless of whether any of known metadata files are present. + """ + # There is no metadata, but it isn't required for a functional collection. Determine the namespace.name from the path. + u_dir_path = to_text(dir_path, errors='surrogate_or_strict') + path_list = u_dir_path.split(os.path.sep) + req_name = '.'.join(path_list[-2:]) + return cls(req_name, '*', dir_path, 'dir', None) # type: ignore[call-arg] + + @classmethod + def from_string(cls, collection_input, artifacts_manager, supplemental_signatures): + req = {} + if _is_concrete_artifact_pointer(collection_input): + # Arg is a file path or URL to a collection + req['name'] = collection_input + else: + req['name'], _sep, req['version'] = collection_input.partition(':') + if not req['version']: + del req['version'] + req['signatures'] = supplemental_signatures + + return cls.from_requirement_dict(req, artifacts_manager) + + @classmethod + def from_requirement_dict(cls, collection_req, art_mgr, validate_signature_options=True): + req_name = collection_req.get('name', None) + req_version = collection_req.get('version', '*') + req_type = collection_req.get('type') + # TODO: decide how to deprecate the old src API behavior + req_source = collection_req.get('source', None) + req_signature_sources = collection_req.get('signatures', None) + if req_signature_sources is not None: + if validate_signature_options and art_mgr.keyring is None: + raise AnsibleError( + f"Signatures were provided to verify {req_name} but no keyring was configured." + ) + + if not isinstance(req_signature_sources, MutableSequence): + req_signature_sources = [req_signature_sources] + req_signature_sources = frozenset(req_signature_sources) + + if req_type is None: + if ( # FIXME: decide on the future behavior: + _ALLOW_CONCRETE_POINTER_IN_SOURCE + and req_source is not None + and _is_concrete_artifact_pointer(req_source) + ): + src_path = req_source + elif ( + req_name is not None + and AnsibleCollectionRef.is_valid_collection_name(req_name) + ): + req_type = 'galaxy' + elif ( + req_name is not None + and _is_concrete_artifact_pointer(req_name) + ): + src_path, req_name = req_name, None + else: + dir_tip_tmpl = ( # NOTE: leading LFs are for concat + '\n\nTip: Make sure you are pointing to the right ' + 'subdirectory — `{src!s}` looks like a directory ' + 'but it is neither a collection, nor a namespace ' + 'dir.' + ) + + if req_source is not None and os.path.isdir(req_source): + tip = dir_tip_tmpl.format(src=req_source) + elif req_name is not None and os.path.isdir(req_name): + tip = dir_tip_tmpl.format(src=req_name) + elif req_name: + tip = '\n\nCould not find {0}.'.format(req_name) + else: + tip = '' + + raise AnsibleError( # NOTE: I'd prefer a ValueError instead + 'Neither the collection requirement entry key ' + "'name', nor 'source' point to a concrete " + "resolvable collection artifact. Also 'name' is " + 'not an FQCN. A valid collection name must be in ' + 'the format <namespace>.<collection>. Please make ' + 'sure that the namespace and the collection name ' + 'contain characters from [a-zA-Z0-9_] only.' + '{extra_tip!s}'.format(extra_tip=tip), + ) + + if req_type is None: + if _is_git_url(src_path): + req_type = 'git' + req_source = src_path + elif _is_http_url(src_path): + req_type = 'url' + req_source = src_path + elif _is_file_path(src_path): + req_type = 'file' + req_source = src_path + elif _is_collection_dir(src_path): + if _is_installed_collection_dir(src_path) and _is_collection_src_dir(src_path): + # Note that ``download`` requires a dir with a ``galaxy.yml`` and fails if it + # doesn't exist, but if a ``MANIFEST.json`` also exists, it would be used + # instead of the ``galaxy.yml``. + raise AnsibleError( + u"Collection requirement at '{path!s}' has both a {manifest_json!s} " + u"file and a {galaxy_yml!s}.\nThe requirement must either be an installed " + u"collection directory or a source collection directory, not both.". + format( + path=to_text(src_path, errors='surrogate_or_strict'), + manifest_json=to_text(_MANIFEST_JSON), + galaxy_yml=to_text(_GALAXY_YAML), + ) + ) + req_type = 'dir' + req_source = src_path + elif _is_collection_namespace_dir(src_path): + req_name = None # No name for a virtual req or "namespace."? + req_type = 'subdirs' + req_source = src_path + else: + raise AnsibleError( # NOTE: this is never supposed to be hit + 'Failed to automatically detect the collection ' + 'requirement type.', + ) + + if req_type not in {'file', 'galaxy', 'git', 'url', 'dir', 'subdirs'}: + raise AnsibleError( + "The collection requirement entry key 'type' must be " + 'one of file, galaxy, git, dir, subdirs, or url.' + ) + + if req_name is None and req_type == 'galaxy': + raise AnsibleError( + 'Collections requirement entry should contain ' + "the key 'name' if it's requested from a Galaxy-like " + 'index server.', + ) + + if req_type != 'galaxy' and req_source is None: + req_source, req_name = req_name, None + + if ( + req_type == 'galaxy' and + isinstance(req_source, GalaxyAPI) and + not _is_http_url(req_source.api_server) + ): + raise AnsibleError( + "Collections requirement 'source' entry should contain " + 'a valid Galaxy API URL but it does not: {not_url!s} ' + 'is not an HTTP URL.'. + format(not_url=req_source.api_server), + ) + + tmp_inst_req = cls(req_name, req_version, req_source, req_type, req_signature_sources) + + if req_type not in {'galaxy', 'subdirs'} and req_name is None: + req_name = art_mgr.get_direct_collection_fqcn(tmp_inst_req) # TODO: fix the cache key in artifacts manager? + + if req_type not in {'galaxy', 'subdirs'} and req_version == '*': + req_version = art_mgr.get_direct_collection_version(tmp_inst_req) + + return cls( + req_name, req_version, + req_source, req_type, + req_signature_sources, + ) + + def __repr__(self): + return ( + '<{self!s} of type {coll_type!r} from {src!s}>'. + format(self=self, coll_type=self.type, src=self.src or 'Galaxy') + ) + + def __str__(self): + return to_native(self.__unicode__()) + + def __unicode__(self): + if self.fqcn is None: + return ( + u'"virtual collection Git repo"' if self.is_scm + else u'"virtual collection namespace"' + ) + + return ( + u'{fqcn!s}:{ver!s}'. + format(fqcn=to_text(self.fqcn), ver=to_text(self.ver)) + ) + + @property + def may_have_offline_galaxy_info(self): + if self.fqcn is None: + # Virtual collection + return False + elif not self.is_dir or self.src is None or not _is_collection_dir(self.src): + # Not a dir or isn't on-disk + return False + return True + + def construct_galaxy_info_path(self, b_collection_path): + if not self.may_have_offline_galaxy_info and not self.type == 'galaxy': + raise TypeError('Only installed collections from a Galaxy server have offline Galaxy info') + + # Store Galaxy metadata adjacent to the namespace of the collection + # Chop off the last two parts of the path (/ns/coll) to get the dir containing the ns + b_src = to_bytes(b_collection_path, errors='surrogate_or_strict') + b_path_parts = b_src.split(to_bytes(os.path.sep))[0:-2] + b_metadata_dir = to_bytes(os.path.sep).join(b_path_parts) + + # ns.coll-1.0.0.info + b_dir_name = to_bytes(f"{self.namespace}.{self.name}-{self.ver}.info", errors="surrogate_or_strict") + + # collections/ansible_collections/ns.coll-1.0.0.info/GALAXY.yml + return os.path.join(b_metadata_dir, b_dir_name, _SOURCE_METADATA_FILE) + + def _get_separate_ns_n_name(self): # FIXME: use LRU cache + return self.fqcn.split('.') + + @property + def namespace(self): + if self.is_virtual: + raise TypeError('Virtual collections do not have a namespace') + + return self._get_separate_ns_n_name()[0] + + @property + def name(self): + if self.is_virtual: + raise TypeError('Virtual collections do not have a name') + + return self._get_separate_ns_n_name()[-1] + + @property + def canonical_package_id(self): + if not self.is_virtual: + return to_native(self.fqcn) + + return ( + '<virtual namespace from {src!s} of type {src_type!s}>'. + format(src=to_native(self.src), src_type=to_native(self.type)) + ) + + @property + def is_virtual(self): + return self.is_scm or self.is_subdirs + + @property + def is_file(self): + return self.type == 'file' + + @property + def is_dir(self): + return self.type == 'dir' + + @property + def namespace_collection_paths(self): + return [ + to_native(path) + for path in _find_collections_in_subdirs(self.src) + ] + + @property + def is_subdirs(self): + return self.type == 'subdirs' + + @property + def is_url(self): + return self.type == 'url' + + @property + def is_scm(self): + return self.type == 'git' + + @property + def is_concrete_artifact(self): + return self.type in {'git', 'url', 'file', 'dir', 'subdirs'} + + @property + def is_online_index_pointer(self): + return not self.is_concrete_artifact + + @property + def source_info(self): + return self._source_info + + +RequirementNamedTuple = namedtuple('Requirement', ('fqcn', 'ver', 'src', 'type', 'signature_sources')) # type: ignore[name-match] + + +CandidateNamedTuple = namedtuple('Candidate', ('fqcn', 'ver', 'src', 'type', 'signatures')) # type: ignore[name-match] + + +class Requirement( + _ComputedReqKindsMixin, + RequirementNamedTuple, +): + """An abstract requirement request.""" + + def __new__(cls, *args, **kwargs): + self = RequirementNamedTuple.__new__(cls, *args, **kwargs) + return self + + def __init__(self, *args, **kwargs): + super(Requirement, self).__init__() + + +class Candidate( + _ComputedReqKindsMixin, + CandidateNamedTuple, +): + """A concrete collection candidate with its version resolved.""" + + def __new__(cls, *args, **kwargs): + self = CandidateNamedTuple.__new__(cls, *args, **kwargs) + return self + + def __init__(self, *args, **kwargs): + super(Candidate, self).__init__() diff --git a/lib/ansible/galaxy/dependency_resolution/errors.py b/lib/ansible/galaxy/dependency_resolution/errors.py new file mode 100644 index 0000000..ae3b439 --- /dev/null +++ b/lib/ansible/galaxy/dependency_resolution/errors.py @@ -0,0 +1,19 @@ +# -*- coding: utf-8 -*- +# Copyright: (c) 2020-2021, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +"""Dependency resolution exceptions.""" + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +try: + from resolvelib.resolvers import ( + ResolutionImpossible as CollectionDependencyResolutionImpossible, + InconsistentCandidate as CollectionDependencyInconsistentCandidate, + ) +except ImportError: + class CollectionDependencyResolutionImpossible(Exception): # type: ignore[no-redef] + pass + + class CollectionDependencyInconsistentCandidate(Exception): # type: ignore[no-redef] + pass diff --git a/lib/ansible/galaxy/dependency_resolution/providers.py b/lib/ansible/galaxy/dependency_resolution/providers.py new file mode 100644 index 0000000..817a1eb --- /dev/null +++ b/lib/ansible/galaxy/dependency_resolution/providers.py @@ -0,0 +1,548 @@ +# -*- coding: utf-8 -*- +# Copyright: (c) 2020-2021, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +"""Requirement provider interfaces.""" + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import functools +import typing as t + +if t.TYPE_CHECKING: + from ansible.galaxy.collection.concrete_artifact_manager import ( + ConcreteArtifactsManager, + ) + from ansible.galaxy.collection.galaxy_api_proxy import MultiGalaxyAPIProxy + from ansible.galaxy.api import GalaxyAPI + +from ansible.galaxy.collection.gpg import get_signature_from_source +from ansible.galaxy.dependency_resolution.dataclasses import ( + Candidate, + Requirement, +) +from ansible.galaxy.dependency_resolution.versioning import ( + is_pre_release, + meets_requirements, +) +from ansible.module_utils.six import string_types +from ansible.utils.version import SemanticVersion, LooseVersion + +from collections.abc import Set + +try: + from resolvelib import AbstractProvider + from resolvelib import __version__ as resolvelib_version +except ImportError: + class AbstractProvider: # type: ignore[no-redef] + pass + + resolvelib_version = '0.0.0' + + +# TODO: add python requirements to ansible-test's ansible-core distribution info and remove the hardcoded lowerbound/upperbound fallback +RESOLVELIB_LOWERBOUND = SemanticVersion("0.5.3") +RESOLVELIB_UPPERBOUND = SemanticVersion("0.9.0") +RESOLVELIB_VERSION = SemanticVersion.from_loose_version(LooseVersion(resolvelib_version)) + + +class PinnedCandidateRequests(Set): + """Custom set class to store Candidate objects. Excludes the 'signatures' attribute when determining if a Candidate instance is in the set.""" + CANDIDATE_ATTRS = ('fqcn', 'ver', 'src', 'type') + + def __init__(self, candidates): + self._candidates = set(candidates) + + def __iter__(self): + return iter(self._candidates) + + def __contains__(self, value): + if not isinstance(value, Candidate): + raise ValueError(f"Expected a Candidate object but got {value!r}") + for candidate in self._candidates: + # Compare Candidate attributes excluding "signatures" since it is + # unrelated to whether or not a matching Candidate is user-requested. + # Candidate objects in the set are not expected to have signatures. + for attr in PinnedCandidateRequests.CANDIDATE_ATTRS: + if getattr(value, attr) != getattr(candidate, attr): + break + else: + return True + return False + + def __len__(self): + return len(self._candidates) + + +class CollectionDependencyProviderBase(AbstractProvider): + """Delegate providing a requirement interface for the resolver.""" + + def __init__( + self, # type: CollectionDependencyProviderBase + apis, # type: MultiGalaxyAPIProxy + concrete_artifacts_manager=None, # type: ConcreteArtifactsManager + user_requirements=None, # type: t.Iterable[Requirement] + preferred_candidates=None, # type: t.Iterable[Candidate] + with_deps=True, # type: bool + with_pre_releases=False, # type: bool + upgrade=False, # type: bool + include_signatures=True, # type: bool + ): # type: (...) -> None + r"""Initialize helper attributes. + + :param api: An instance of the multiple Galaxy APIs wrapper. + + :param concrete_artifacts_manager: An instance of the caching \ + concrete artifacts manager. + + :param with_deps: A flag specifying whether the resolver \ + should attempt to pull-in the deps of the \ + requested requirements. On by default. + + :param with_pre_releases: A flag specifying whether the \ + resolver should skip pre-releases. \ + Off by default. + + :param upgrade: A flag specifying whether the resolver should \ + skip matching versions that are not upgrades. \ + Off by default. + + :param include_signatures: A flag to determine whether to retrieve \ + signatures from the Galaxy APIs and \ + include signatures in matching Candidates. \ + On by default. + """ + self._api_proxy = apis + self._make_req_from_dict = functools.partial( + Requirement.from_requirement_dict, + art_mgr=concrete_artifacts_manager, + ) + self._pinned_candidate_requests = PinnedCandidateRequests( + # NOTE: User-provided signatures are supplemental, so signatures + # NOTE: are not used to determine if a candidate is user-requested + Candidate(req.fqcn, req.ver, req.src, req.type, None) + for req in (user_requirements or ()) + if req.is_concrete_artifact or ( + req.ver != '*' and + not req.ver.startswith(('<', '>', '!=')) + ) + ) + self._preferred_candidates = set(preferred_candidates or ()) + self._with_deps = with_deps + self._with_pre_releases = with_pre_releases + self._upgrade = upgrade + self._include_signatures = include_signatures + + def _is_user_requested(self, candidate): # type: (Candidate) -> bool + """Check if the candidate is requested by the user.""" + if candidate in self._pinned_candidate_requests: + return True + + if candidate.is_online_index_pointer and candidate.src is not None: + # NOTE: Candidate is a namedtuple, it has a source server set + # NOTE: to a specific GalaxyAPI instance or `None`. When the + # NOTE: user runs + # NOTE: + # NOTE: $ ansible-galaxy collection install ns.coll + # NOTE: + # NOTE: then it's saved in `self._pinned_candidate_requests` + # NOTE: as `('ns.coll', '*', None, 'galaxy')` but then + # NOTE: `self.find_matches()` calls `self.is_satisfied_by()` + # NOTE: with Candidate instances bound to each specific + # NOTE: server available, those look like + # NOTE: `('ns.coll', '*', GalaxyAPI(...), 'galaxy')` and + # NOTE: wouldn't match the user requests saved in + # NOTE: `self._pinned_candidate_requests`. This is why we + # NOTE: normalize the collection to have `src=None` and try + # NOTE: again. + # NOTE: + # NOTE: When the user request comes from `requirements.yml` + # NOTE: with the `source:` set, it'll match the first check + # NOTE: but it still can have entries with `src=None` so this + # NOTE: normalized check is still necessary. + # NOTE: + # NOTE: User-provided signatures are supplemental, so signatures + # NOTE: are not used to determine if a candidate is user-requested + return Candidate( + candidate.fqcn, candidate.ver, None, candidate.type, None + ) in self._pinned_candidate_requests + + return False + + def identify(self, requirement_or_candidate): + # type: (t.Union[Candidate, Requirement]) -> str + """Given requirement or candidate, return an identifier for it. + + This is used to identify a requirement or candidate, e.g. + whether two requirements should have their specifier parts + (version ranges or pins) merged, whether two candidates would + conflict with each other (because they have same name but + different versions). + """ + return requirement_or_candidate.canonical_package_id + + def get_preference(self, *args, **kwargs): + # type: (t.Any, t.Any) -> t.Union[float, int] + """Return sort key function return value for given requirement. + + This result should be based on preference that is defined as + "I think this requirement should be resolved first". + The lower the return value is, the more preferred this + group of arguments is. + + resolvelib >=0.5.3, <0.7.0 + + :param resolution: Currently pinned candidate, or ``None``. + + :param candidates: A list of possible candidates. + + :param information: A list of requirement information. + + Each ``information`` instance is a named tuple with two entries: + + * ``requirement`` specifies a requirement contributing to + the current candidate list + + * ``parent`` specifies the candidate that provides + (dependend on) the requirement, or `None` + to indicate a root requirement. + + resolvelib >=0.7.0, < 0.8.0 + + :param identifier: The value returned by ``identify()``. + + :param resolutions: Mapping of identifier, candidate pairs. + + :param candidates: Possible candidates for the identifer. + Mapping of identifier, list of candidate pairs. + + :param information: Requirement information of each package. + Mapping of identifier, list of named tuple pairs. + The named tuples have the entries ``requirement`` and ``parent``. + + resolvelib >=0.8.0, <= 0.8.1 + + :param identifier: The value returned by ``identify()``. + + :param resolutions: Mapping of identifier, candidate pairs. + + :param candidates: Possible candidates for the identifer. + Mapping of identifier, list of candidate pairs. + + :param information: Requirement information of each package. + Mapping of identifier, list of named tuple pairs. + The named tuples have the entries ``requirement`` and ``parent``. + + :param backtrack_causes: Sequence of requirement information that were + the requirements that caused the resolver to most recently backtrack. + + The preference could depend on a various of issues, including + (not necessarily in this order): + + * Is this package pinned in the current resolution result? + + * How relaxed is the requirement? Stricter ones should + probably be worked on first? (I don't know, actually.) + + * How many possibilities are there to satisfy this + requirement? Those with few left should likely be worked on + first, I guess? + + * Are there any known conflicts for this requirement? + We should probably work on those with the most + known conflicts. + + A sortable value should be returned (this will be used as the + `key` parameter of the built-in sorting function). The smaller + the value is, the more preferred this requirement is (i.e. the + sorting function is called with ``reverse=False``). + """ + raise NotImplementedError + + def _get_preference(self, candidates): + # type: (list[Candidate]) -> t.Union[float, int] + if any( + candidate in self._preferred_candidates + for candidate in candidates + ): + # NOTE: Prefer pre-installed candidates over newer versions + # NOTE: available from Galaxy or other sources. + return float('-inf') + return len(candidates) + + def find_matches(self, *args, **kwargs): + # type: (t.Any, t.Any) -> list[Candidate] + r"""Find all possible candidates satisfying given requirements. + + This tries to get candidates based on the requirements' types. + + For concrete requirements (SCM, dir, namespace dir, local or + remote archives), the one-and-only match is returned + + For a "named" requirement, Galaxy-compatible APIs are consulted + to find concrete candidates for this requirement. Of theres a + pre-installed candidate, it's prepended in front of others. + + resolvelib >=0.5.3, <0.6.0 + + :param requirements: A collection of requirements which all of \ + the returned candidates must match. \ + All requirements are guaranteed to have \ + the same identifier. \ + The collection is never empty. + + resolvelib >=0.6.0 + + :param identifier: The value returned by ``identify()``. + + :param requirements: The requirements all returned candidates must satisfy. + Mapping of identifier, iterator of requirement pairs. + + :param incompatibilities: Incompatible versions that must be excluded + from the returned list. + + :returns: An iterable that orders candidates by preference, \ + e.g. the most preferred candidate comes first. + """ + raise NotImplementedError + + def _find_matches(self, requirements): + # type: (list[Requirement]) -> list[Candidate] + # FIXME: The first requirement may be a Git repo followed by + # FIXME: its cloned tmp dir. Using only the first one creates + # FIXME: loops that prevent any further dependency exploration. + # FIXME: We need to figure out how to prevent this. + first_req = requirements[0] + fqcn = first_req.fqcn + # The fqcn is guaranteed to be the same + version_req = "A SemVer-compliant version or '*' is required. See https://semver.org to learn how to compose it correctly. " + version_req += "This is an issue with the collection." + + # If we're upgrading collections, we can't calculate preinstalled_candidates until the latest matches are found. + # Otherwise, we can potentially avoid a Galaxy API call by doing this first. + preinstalled_candidates = set() + if not self._upgrade and first_req.type == 'galaxy': + preinstalled_candidates = { + candidate for candidate in self._preferred_candidates + if candidate.fqcn == fqcn and + all(self.is_satisfied_by(requirement, candidate) for requirement in requirements) + } + try: + coll_versions = [] if preinstalled_candidates else self._api_proxy.get_collection_versions(first_req) # type: t.Iterable[t.Tuple[str, GalaxyAPI]] + except TypeError as exc: + if first_req.is_concrete_artifact: + # Non hashable versions will cause a TypeError + raise ValueError( + f"Invalid version found for the collection '{first_req}'. {version_req}" + ) from exc + # Unexpected error from a Galaxy server + raise + + if first_req.is_concrete_artifact: + # FIXME: do we assume that all the following artifacts are also concrete? + # FIXME: does using fqcn==None cause us problems here? + + # Ensure the version found in the concrete artifact is SemVer-compliant + for version, req_src in coll_versions: + version_err = f"Invalid version found for the collection '{first_req}': {version} ({type(version)}). {version_req}" + # NOTE: The known cases causing the version to be a non-string object come from + # NOTE: the differences in how the YAML parser normalizes ambiguous values and + # NOTE: how the end-users sometimes expect them to be parsed. Unless the users + # NOTE: explicitly use the double quotes of one of the multiline string syntaxes + # NOTE: in the collection metadata file, PyYAML will parse a value containing + # NOTE: two dot-separated integers as `float`, a single integer as `int`, and 3+ + # NOTE: integers as a `str`. In some cases, they may also use an empty value + # NOTE: which is normalized as `null` and turned into `None` in the Python-land. + # NOTE: Another known mistake is setting a minor part of the SemVer notation + # NOTE: skipping the "patch" bit like "1.0" which is assumed non-compliant even + # NOTE: after the conversion to string. + if not isinstance(version, string_types): + raise ValueError(version_err) + elif version != '*': + try: + SemanticVersion(version) + except ValueError as ex: + raise ValueError(version_err) from ex + + return [ + Candidate(fqcn, version, _none_src_server, first_req.type, None) + for version, _none_src_server in coll_versions + ] + + latest_matches = [] + signatures = [] + extra_signature_sources = [] # type: list[str] + for version, src_server in coll_versions: + tmp_candidate = Candidate(fqcn, version, src_server, 'galaxy', None) + + unsatisfied = False + for requirement in requirements: + unsatisfied |= not self.is_satisfied_by(requirement, tmp_candidate) + # FIXME + # unsatisfied |= not self.is_satisfied_by(requirement, tmp_candidate) or not ( + # requirement.src is None or # if this is true for some candidates but not all it will break key param - Nonetype can't be compared to str + # or requirement.src == candidate.src + # ) + if unsatisfied: + break + if not self._include_signatures: + continue + + extra_signature_sources.extend(requirement.signature_sources or []) + + if not unsatisfied: + if self._include_signatures: + signatures = src_server.get_collection_signatures(first_req.namespace, first_req.name, version) + for extra_source in extra_signature_sources: + signatures.append(get_signature_from_source(extra_source)) + latest_matches.append( + Candidate(fqcn, version, src_server, 'galaxy', frozenset(signatures)) + ) + + latest_matches.sort( + key=lambda candidate: ( + SemanticVersion(candidate.ver), candidate.src, + ), + reverse=True, # prefer newer versions over older ones + ) + + if not preinstalled_candidates: + preinstalled_candidates = { + candidate for candidate in self._preferred_candidates + if candidate.fqcn == fqcn and + ( + # check if an upgrade is necessary + all(self.is_satisfied_by(requirement, candidate) for requirement in requirements) and + ( + not self._upgrade or + # check if an upgrade is preferred + all(SemanticVersion(latest.ver) <= SemanticVersion(candidate.ver) for latest in latest_matches) + ) + ) + } + + return list(preinstalled_candidates) + latest_matches + + def is_satisfied_by(self, requirement, candidate): + # type: (Requirement, Candidate) -> bool + r"""Whether the given requirement is satisfiable by a candidate. + + :param requirement: A requirement that produced the `candidate`. + + :param candidate: A pinned candidate supposedly matchine the \ + `requirement` specifier. It is guaranteed to \ + have been generated from the `requirement`. + + :returns: Indication whether the `candidate` is a viable \ + solution to the `requirement`. + """ + # NOTE: Only allow pre-release candidates if we want pre-releases + # NOTE: or the req ver was an exact match with the pre-release + # NOTE: version. Another case where we'd want to allow + # NOTE: pre-releases is when there are several user requirements + # NOTE: and one of them is a pre-release that also matches a + # NOTE: transitive dependency of another requirement. + allow_pre_release = self._with_pre_releases or not ( + requirement.ver == '*' or + requirement.ver.startswith('<') or + requirement.ver.startswith('>') or + requirement.ver.startswith('!=') + ) or self._is_user_requested(candidate) + if is_pre_release(candidate.ver) and not allow_pre_release: + return False + + # NOTE: This is a set of Pipenv-inspired optimizations. Ref: + # https://github.com/sarugaku/passa/blob/2ac00f1/src/passa/models/providers.py#L58-L74 + if ( + requirement.is_virtual or + candidate.is_virtual or + requirement.ver == '*' + ): + return True + + return meets_requirements( + version=candidate.ver, + requirements=requirement.ver, + ) + + def get_dependencies(self, candidate): + # type: (Candidate) -> list[Candidate] + r"""Get direct dependencies of a candidate. + + :returns: A collection of requirements that `candidate` \ + specifies as its dependencies. + """ + # FIXME: If there's several galaxy servers set, there may be a + # FIXME: situation when the metadata of the same collection + # FIXME: differs. So how do we resolve this case? Priority? + # FIXME: Taking into account a pinned hash? Exploding on + # FIXME: any differences? + # NOTE: The underlying implmentation currently uses first found + req_map = self._api_proxy.get_collection_dependencies(candidate) + + # NOTE: This guard expression MUST perform an early exit only + # NOTE: after the `get_collection_dependencies()` call because + # NOTE: internally it polulates the artifact URL of the candidate, + # NOTE: its SHA hash and the Galaxy API token. These are still + # NOTE: necessary with `--no-deps` because even with the disabled + # NOTE: dependency resolution the outer layer will still need to + # NOTE: know how to download and validate the artifact. + # + # NOTE: Virtual candidates should always return dependencies + # NOTE: because they are ephemeral and non-installable. + if not self._with_deps and not candidate.is_virtual: + return [] + + return [ + self._make_req_from_dict({'name': dep_name, 'version': dep_req}) + for dep_name, dep_req in req_map.items() + ] + + +# Classes to handle resolvelib API changes between minor versions for 0.X +class CollectionDependencyProvider050(CollectionDependencyProviderBase): + def find_matches(self, requirements): # type: ignore[override] + # type: (list[Requirement]) -> list[Candidate] + return self._find_matches(requirements) + + def get_preference(self, resolution, candidates, information): # type: ignore[override] + # type: (t.Optional[Candidate], list[Candidate], list[t.NamedTuple]) -> t.Union[float, int] + return self._get_preference(candidates) + + +class CollectionDependencyProvider060(CollectionDependencyProviderBase): + def find_matches(self, identifier, requirements, incompatibilities): # type: ignore[override] + # type: (str, t.Mapping[str, t.Iterator[Requirement]], t.Mapping[str, t.Iterator[Requirement]]) -> list[Candidate] + return [ + match for match in self._find_matches(list(requirements[identifier])) + if not any(match.ver == incompat.ver for incompat in incompatibilities[identifier]) + ] + + def get_preference(self, resolution, candidates, information): # type: ignore[override] + # type: (t.Optional[Candidate], list[Candidate], list[t.NamedTuple]) -> t.Union[float, int] + return self._get_preference(candidates) + + +class CollectionDependencyProvider070(CollectionDependencyProvider060): + def get_preference(self, identifier, resolutions, candidates, information): # type: ignore[override] + # type: (str, t.Mapping[str, Candidate], t.Mapping[str, t.Iterator[Candidate]], t.Iterator[t.NamedTuple]) -> t.Union[float, int] + return self._get_preference(list(candidates[identifier])) + + +class CollectionDependencyProvider080(CollectionDependencyProvider060): + def get_preference(self, identifier, resolutions, candidates, information, backtrack_causes): # type: ignore[override] + # type: (str, t.Mapping[str, Candidate], t.Mapping[str, t.Iterator[Candidate]], t.Iterator[t.NamedTuple], t.Sequence) -> t.Union[float, int] + return self._get_preference(list(candidates[identifier])) + + +def _get_provider(): # type () -> CollectionDependencyProviderBase + if RESOLVELIB_VERSION >= SemanticVersion("0.8.0"): + return CollectionDependencyProvider080 + if RESOLVELIB_VERSION >= SemanticVersion("0.7.0"): + return CollectionDependencyProvider070 + if RESOLVELIB_VERSION >= SemanticVersion("0.6.0"): + return CollectionDependencyProvider060 + return CollectionDependencyProvider050 + + +CollectionDependencyProvider = _get_provider() diff --git a/lib/ansible/galaxy/dependency_resolution/reporters.py b/lib/ansible/galaxy/dependency_resolution/reporters.py new file mode 100644 index 0000000..69908b2 --- /dev/null +++ b/lib/ansible/galaxy/dependency_resolution/reporters.py @@ -0,0 +1,21 @@ +# -*- coding: utf-8 -*- +# Copyright: (c) 2020-2021, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +"""Requiement reporter implementations.""" + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +try: + from resolvelib import BaseReporter +except ImportError: + class BaseReporter: # type: ignore[no-redef] + pass + + +class CollectionDependencyReporter(BaseReporter): + """A dependency reporter for Ansible Collections. + + This is a proxy class allowing us to abstract away importing resolvelib + outside of the `ansible.galaxy.dependency_resolution` Python package. + """ diff --git a/lib/ansible/galaxy/dependency_resolution/resolvers.py b/lib/ansible/galaxy/dependency_resolution/resolvers.py new file mode 100644 index 0000000..87ca38d --- /dev/null +++ b/lib/ansible/galaxy/dependency_resolution/resolvers.py @@ -0,0 +1,21 @@ +# -*- coding: utf-8 -*- +# Copyright: (c) 2020-2021, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +"""Requirement resolver implementations.""" + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +try: + from resolvelib import Resolver +except ImportError: + class Resolver: # type: ignore[no-redef] + pass + + +class CollectionDependencyResolver(Resolver): + """A dependency resolver for Ansible Collections. + + This is a proxy class allowing us to abstract away importing resolvelib + outside of the `ansible.galaxy.dependency_resolution` Python package. + """ diff --git a/lib/ansible/galaxy/dependency_resolution/versioning.py b/lib/ansible/galaxy/dependency_resolution/versioning.py new file mode 100644 index 0000000..93adce4 --- /dev/null +++ b/lib/ansible/galaxy/dependency_resolution/versioning.py @@ -0,0 +1,70 @@ +# -*- coding: utf-8 -*- +# Copyright: (c) 2019-2020, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +"""Version comparison helpers.""" + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import operator + +from ansible.module_utils.compat.version import LooseVersion +from ansible.utils.version import SemanticVersion + + +def is_pre_release(version): + # type: (str) -> bool + """Figure out if a given version is a pre-release.""" + try: + return SemanticVersion(version).is_prerelease + except ValueError: + return False + + +def meets_requirements(version, requirements): + # type: (str, str) -> bool + """Verify if a given version satisfies all the requirements. + + Supported version identifiers are: + * '==' + * '!=' + * '>' + * '>=' + * '<' + * '<=' + * '*' + + Each requirement is delimited by ','. + """ + op_map = { + '!=': operator.ne, + '==': operator.eq, + '=': operator.eq, + '>=': operator.ge, + '>': operator.gt, + '<=': operator.le, + '<': operator.lt, + } + + for req in requirements.split(','): + op_pos = 2 if len(req) > 1 and req[1] == '=' else 1 + op = op_map.get(req[:op_pos]) + + requirement = req[op_pos:] + if not op: + requirement = req + op = operator.eq + + if requirement == '*' or version == '*': + continue + + if not op( + SemanticVersion(version), + SemanticVersion.from_loose_version(LooseVersion(requirement)), + ): + break + else: + return True + + # The loop was broken early, it does not meet all the requirements + return False diff --git a/lib/ansible/galaxy/role.py b/lib/ansible/galaxy/role.py new file mode 100644 index 0000000..99bb525 --- /dev/null +++ b/lib/ansible/galaxy/role.py @@ -0,0 +1,439 @@ +######################################################################## +# +# (C) 2015, Brian Coca <bcoca@ansible.com> +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see <http://www.gnu.org/licenses/>. +# +######################################################################## + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import errno +import datetime +import os +import tarfile +import tempfile + +from collections.abc import MutableSequence +from shutil import rmtree + +from ansible import context +from ansible.errors import AnsibleError, AnsibleParserError +from ansible.galaxy.api import GalaxyAPI +from ansible.galaxy.user_agent import user_agent +from ansible.module_utils._text import to_native, to_text +from ansible.module_utils.common.yaml import yaml_dump, yaml_load +from ansible.module_utils.compat.version import LooseVersion +from ansible.module_utils.urls import open_url +from ansible.playbook.role.requirement import RoleRequirement +from ansible.utils.display import Display + +display = Display() + + +class GalaxyRole(object): + + SUPPORTED_SCMS = set(['git', 'hg']) + META_MAIN = (os.path.join('meta', 'main.yml'), os.path.join('meta', 'main.yaml')) + META_INSTALL = os.path.join('meta', '.galaxy_install_info') + META_REQUIREMENTS = (os.path.join('meta', 'requirements.yml'), os.path.join('meta', 'requirements.yaml')) + ROLE_DIRS = ('defaults', 'files', 'handlers', 'meta', 'tasks', 'templates', 'vars', 'tests') + + def __init__(self, galaxy, api, name, src=None, version=None, scm=None, path=None): + + self._metadata = None + self._metadata_dependencies = None + self._requirements = None + self._install_info = None + self._validate_certs = not context.CLIARGS['ignore_certs'] + + display.debug('Validate TLS certificates: %s' % self._validate_certs) + + self.galaxy = galaxy + self._api = api + + self.name = name + self.version = version + self.src = src or name + self.download_url = None + self.scm = scm + self.paths = [os.path.join(x, self.name) for x in galaxy.roles_paths] + + if path is not None: + if not path.endswith(os.path.join(os.path.sep, self.name)): + path = os.path.join(path, self.name) + else: + # Look for a meta/main.ya?ml inside the potential role dir in case + # the role name is the same as parent directory of the role. + # + # Example: + # ./roles/testing/testing/meta/main.yml + for meta_main in self.META_MAIN: + if os.path.exists(os.path.join(path, name, meta_main)): + path = os.path.join(path, self.name) + break + self.path = path + else: + # use the first path by default + self.path = self.paths[0] + + def __repr__(self): + """ + Returns "rolename (version)" if version is not null + Returns "rolename" otherwise + """ + if self.version: + return "%s (%s)" % (self.name, self.version) + else: + return self.name + + def __eq__(self, other): + return self.name == other.name + + @property + def api(self): + if not isinstance(self._api, GalaxyAPI): + return self._api.api + return self._api + + @property + def metadata(self): + """ + Returns role metadata + """ + if self._metadata is None: + for path in self.paths: + for meta_main in self.META_MAIN: + meta_path = os.path.join(path, meta_main) + if os.path.isfile(meta_path): + try: + with open(meta_path, 'r') as f: + self._metadata = yaml_load(f) + except Exception: + display.vvvvv("Unable to load metadata for %s" % self.name) + return False + break + + return self._metadata + + @property + def metadata_dependencies(self): + """ + Returns a list of dependencies from role metadata + """ + if self._metadata_dependencies is None: + self._metadata_dependencies = [] + + if self.metadata is not None: + self._metadata_dependencies = self.metadata.get('dependencies') or [] + + if not isinstance(self._metadata_dependencies, MutableSequence): + raise AnsibleParserError( + f"Expected role dependencies to be a list. Role {self} has meta/main.yml with dependencies {self._metadata_dependencies}" + ) + + return self._metadata_dependencies + + @property + def install_info(self): + """ + Returns role install info + """ + if self._install_info is None: + + info_path = os.path.join(self.path, self.META_INSTALL) + if os.path.isfile(info_path): + try: + f = open(info_path, 'r') + self._install_info = yaml_load(f) + except Exception: + display.vvvvv("Unable to load Galaxy install info for %s" % self.name) + return False + finally: + f.close() + return self._install_info + + @property + def _exists(self): + for path in self.paths: + if os.path.isdir(path): + return True + + return False + + def _write_galaxy_install_info(self): + """ + Writes a YAML-formatted file to the role's meta/ directory + (named .galaxy_install_info) which contains some information + we can use later for commands like 'list' and 'info'. + """ + + info = dict( + version=self.version, + install_date=datetime.datetime.utcnow().strftime("%c"), + ) + if not os.path.exists(os.path.join(self.path, 'meta')): + os.makedirs(os.path.join(self.path, 'meta')) + info_path = os.path.join(self.path, self.META_INSTALL) + with open(info_path, 'w+') as f: + try: + self._install_info = yaml_dump(info, f) + except Exception: + return False + + return True + + def remove(self): + """ + Removes the specified role from the roles path. + There is a sanity check to make sure there's a meta/main.yml file at this + path so the user doesn't blow away random directories. + """ + if self.metadata: + try: + rmtree(self.path) + return True + except Exception: + pass + + return False + + def fetch(self, role_data): + """ + Downloads the archived role to a temp location based on role data + """ + if role_data: + + # first grab the file and save it to a temp location + if self.download_url is not None: + archive_url = self.download_url + elif "github_user" in role_data and "github_repo" in role_data: + archive_url = 'https://github.com/%s/%s/archive/%s.tar.gz' % (role_data["github_user"], role_data["github_repo"], self.version) + else: + archive_url = self.src + + display.display("- downloading role from %s" % archive_url) + + try: + url_file = open_url(archive_url, validate_certs=self._validate_certs, http_agent=user_agent()) + temp_file = tempfile.NamedTemporaryFile(delete=False) + data = url_file.read() + while data: + temp_file.write(data) + data = url_file.read() + temp_file.close() + return temp_file.name + except Exception as e: + display.error(u"failed to download the file: %s" % to_text(e)) + + return False + + def install(self): + + if self.scm: + # create tar file from scm url + tmp_file = RoleRequirement.scm_archive_role(keep_scm_meta=context.CLIARGS['keep_scm_meta'], **self.spec) + elif self.src: + if os.path.isfile(self.src): + tmp_file = self.src + elif '://' in self.src: + role_data = self.src + tmp_file = self.fetch(role_data) + else: + role_data = self.api.lookup_role_by_name(self.src) + if not role_data: + raise AnsibleError("- sorry, %s was not found on %s." % (self.src, self.api.api_server)) + + if role_data.get('role_type') == 'APP': + # Container Role + display.warning("%s is a Container App role, and should only be installed using Ansible " + "Container" % self.name) + + role_versions = self.api.fetch_role_related('versions', role_data['id']) + if not self.version: + # convert the version names to LooseVersion objects + # and sort them to get the latest version. If there + # are no versions in the list, we'll grab the head + # of the master branch + if len(role_versions) > 0: + loose_versions = [LooseVersion(a.get('name', None)) for a in role_versions] + try: + loose_versions.sort() + except TypeError: + raise AnsibleError( + 'Unable to compare role versions (%s) to determine the most recent version due to incompatible version formats. ' + 'Please contact the role author to resolve versioning conflicts, or specify an explicit role version to ' + 'install.' % ', '.join([v.vstring for v in loose_versions]) + ) + self.version = to_text(loose_versions[-1]) + elif role_data.get('github_branch', None): + self.version = role_data['github_branch'] + else: + self.version = 'master' + elif self.version != 'master': + if role_versions and to_text(self.version) not in [a.get('name', None) for a in role_versions]: + raise AnsibleError("- the specified version (%s) of %s was not found in the list of available versions (%s)." % (self.version, + self.name, + role_versions)) + + # check if there's a source link/url for our role_version + for role_version in role_versions: + if role_version['name'] == self.version and 'source' in role_version: + self.src = role_version['source'] + if role_version['name'] == self.version and 'download_url' in role_version: + self.download_url = role_version['download_url'] + + tmp_file = self.fetch(role_data) + + else: + raise AnsibleError("No valid role data found") + + if tmp_file: + + display.debug("installing from %s" % tmp_file) + + if not tarfile.is_tarfile(tmp_file): + raise AnsibleError("the downloaded file does not appear to be a valid tar archive.") + else: + role_tar_file = tarfile.open(tmp_file, "r") + # verify the role's meta file + meta_file = None + members = role_tar_file.getmembers() + # next find the metadata file + for member in members: + for meta_main in self.META_MAIN: + if meta_main in member.name: + # Look for parent of meta/main.yml + # Due to possibility of sub roles each containing meta/main.yml + # look for shortest length parent + meta_parent_dir = os.path.dirname(os.path.dirname(member.name)) + if not meta_file: + archive_parent_dir = meta_parent_dir + meta_file = member + else: + if len(meta_parent_dir) < len(archive_parent_dir): + archive_parent_dir = meta_parent_dir + meta_file = member + if not meta_file: + raise AnsibleError("this role does not appear to have a meta/main.yml file.") + else: + try: + self._metadata = yaml_load(role_tar_file.extractfile(meta_file)) + except Exception: + raise AnsibleError("this role does not appear to have a valid meta/main.yml file.") + + paths = self.paths + if self.path != paths[0]: + # path can be passed though __init__ + # FIXME should this be done in __init__? + paths[:0] = self.path + paths_len = len(paths) + for idx, path in enumerate(paths): + self.path = path + display.display("- extracting %s to %s" % (self.name, self.path)) + try: + if os.path.exists(self.path): + if not os.path.isdir(self.path): + raise AnsibleError("the specified roles path exists and is not a directory.") + elif not context.CLIARGS.get("force", False): + raise AnsibleError("the specified role %s appears to already exist. Use --force to replace it." % self.name) + else: + # using --force, remove the old path + if not self.remove(): + raise AnsibleError("%s doesn't appear to contain a role.\n please remove this directory manually if you really " + "want to put the role here." % self.path) + else: + os.makedirs(self.path) + + # We strip off any higher-level directories for all of the files + # contained within the tar file here. The default is 'github_repo-target'. + # Gerrit instances, on the other hand, does not have a parent directory at all. + for member in members: + # we only extract files, and remove any relative path + # bits that might be in the file for security purposes + # and drop any containing directory, as mentioned above + if member.isreg() or member.issym(): + n_member_name = to_native(member.name) + n_archive_parent_dir = to_native(archive_parent_dir) + n_parts = n_member_name.replace(n_archive_parent_dir, "", 1).split(os.sep) + n_final_parts = [] + for n_part in n_parts: + # TODO if the condition triggers it produces a broken installation. + # It will create the parent directory as an empty file and will + # explode if the directory contains valid files. + # Leaving this as is since the whole module needs a rewrite. + if n_part != '..' and not n_part.startswith('~') and '$' not in n_part: + n_final_parts.append(n_part) + member.name = os.path.join(*n_final_parts) + role_tar_file.extract(member, to_native(self.path)) + + # write out the install info file for later use + self._write_galaxy_install_info() + break + except OSError as e: + if e.errno == errno.EACCES and idx < paths_len - 1: + continue + raise AnsibleError("Could not update files in %s: %s" % (self.path, to_native(e))) + + # return the parsed yaml metadata + display.display("- %s was installed successfully" % str(self)) + if not (self.src and os.path.isfile(self.src)): + try: + os.unlink(tmp_file) + except (OSError, IOError) as e: + display.warning(u"Unable to remove tmp file (%s): %s" % (tmp_file, to_text(e))) + return True + + return False + + @property + def spec(self): + """ + Returns role spec info + { + 'scm': 'git', + 'src': 'http://git.example.com/repos/repo.git', + 'version': 'v1.0', + 'name': 'repo' + } + """ + return dict(scm=self.scm, src=self.src, version=self.version, name=self.name) + + @property + def requirements(self): + """ + Returns role requirements + """ + if self._requirements is None: + self._requirements = [] + for meta_requirements in self.META_REQUIREMENTS: + meta_path = os.path.join(self.path, meta_requirements) + if os.path.isfile(meta_path): + try: + f = open(meta_path, 'r') + self._requirements = yaml_load(f) + except Exception: + display.vvvvv("Unable to load requirements for %s" % self.name) + finally: + f.close() + + break + + if not isinstance(self._requirements, MutableSequence): + raise AnsibleParserError(f"Expected role dependencies to be a list. Role {self} has meta/requirements.yml {self._requirements}") + + return self._requirements diff --git a/lib/ansible/galaxy/token.py b/lib/ansible/galaxy/token.py new file mode 100644 index 0000000..4455fd0 --- /dev/null +++ b/lib/ansible/galaxy/token.py @@ -0,0 +1,187 @@ +######################################################################## +# +# (C) 2015, Chris Houseknecht <chouse@ansible.com> +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see <http://www.gnu.org/licenses/>. +# +######################################################################## +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import base64 +import os +import json +from stat import S_IRUSR, S_IWUSR + +from ansible import constants as C +from ansible.galaxy.user_agent import user_agent +from ansible.module_utils._text import to_bytes, to_native, to_text +from ansible.module_utils.common.yaml import yaml_dump, yaml_load +from ansible.module_utils.urls import open_url +from ansible.utils.display import Display + +display = Display() + + +class NoTokenSentinel(object): + """ Represents an ansible.cfg server with not token defined (will ignore cmdline and GALAXY_TOKEN_PATH. """ + def __new__(cls, *args, **kwargs): + return cls + + +class KeycloakToken(object): + '''A token granted by a Keycloak server. + + Like sso.redhat.com as used by cloud.redhat.com + ie Automation Hub''' + + token_type = 'Bearer' + + def __init__(self, access_token=None, auth_url=None, validate_certs=True, client_id=None): + self.access_token = access_token + self.auth_url = auth_url + self._token = None + self.validate_certs = validate_certs + self.client_id = client_id + if self.client_id is None: + self.client_id = 'cloud-services' + + def _form_payload(self): + return 'grant_type=refresh_token&client_id=%s&refresh_token=%s' % (self.client_id, + self.access_token) + + def get(self): + if self._token: + return self._token + + # - build a request to POST to auth_url + # - body is form encoded + # - 'request_token' is the offline token stored in ansible.cfg + # - 'grant_type' is 'refresh_token' + # - 'client_id' is 'cloud-services' + # - should probably be based on the contents of the + # offline_ticket's JWT payload 'aud' (audience) + # or 'azp' (Authorized party - the party to which the ID Token was issued) + payload = self._form_payload() + + resp = open_url(to_native(self.auth_url), + data=payload, + validate_certs=self.validate_certs, + method='POST', + http_agent=user_agent()) + + # TODO: handle auth errors + + data = json.loads(to_text(resp.read(), errors='surrogate_or_strict')) + + # - extract 'access_token' + self._token = data.get('access_token') + + return self._token + + def headers(self): + headers = {} + headers['Authorization'] = '%s %s' % (self.token_type, self.get()) + return headers + + +class GalaxyToken(object): + ''' Class to storing and retrieving local galaxy token ''' + + token_type = 'Token' + + def __init__(self, token=None): + self.b_file = to_bytes(C.GALAXY_TOKEN_PATH, errors='surrogate_or_strict') + # Done so the config file is only opened when set/get/save is called + self._config = None + self._token = token + + @property + def config(self): + if self._config is None: + self._config = self._read() + + # Prioritise the token passed into the constructor + if self._token: + self._config['token'] = None if self._token is NoTokenSentinel else self._token + + return self._config + + def _read(self): + action = 'Opened' + if not os.path.isfile(self.b_file): + # token file not found, create and chmod u+rw + open(self.b_file, 'w').close() + os.chmod(self.b_file, S_IRUSR | S_IWUSR) # owner has +rw + action = 'Created' + + with open(self.b_file, 'r') as f: + config = yaml_load(f) + + display.vvv('%s %s' % (action, to_text(self.b_file))) + + if config and not isinstance(config, dict): + display.vvv('Galaxy token file %s malformed, unable to read it' % to_text(self.b_file)) + return {} + + return config or {} + + def set(self, token): + self._token = token + self.save() + + def get(self): + return self.config.get('token', None) + + def save(self): + with open(self.b_file, 'w') as f: + yaml_dump(self.config, f, default_flow_style=False) + + def headers(self): + headers = {} + token = self.get() + if token: + headers['Authorization'] = '%s %s' % (self.token_type, self.get()) + return headers + + +class BasicAuthToken(object): + token_type = 'Basic' + + def __init__(self, username, password=None): + self.username = username + self.password = password + self._token = None + + @staticmethod + def _encode_token(username, password): + token = "%s:%s" % (to_text(username, errors='surrogate_or_strict'), + to_text(password, errors='surrogate_or_strict', nonstring='passthru') or '') + b64_val = base64.b64encode(to_bytes(token, encoding='utf-8', errors='surrogate_or_strict')) + return to_text(b64_val) + + def get(self): + if self._token: + return self._token + + self._token = self._encode_token(self.username, self.password) + + return self._token + + def headers(self): + headers = {} + headers['Authorization'] = '%s %s' % (self.token_type, self.get()) + return headers diff --git a/lib/ansible/galaxy/user_agent.py b/lib/ansible/galaxy/user_agent.py new file mode 100644 index 0000000..c860bcd --- /dev/null +++ b/lib/ansible/galaxy/user_agent.py @@ -0,0 +1,23 @@ +# Copyright: (c) 2019, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import platform +import sys + +from ansible.module_utils.ansible_release import __version__ as ansible_version + + +def user_agent(): + """Returns a user agent used by ansible-galaxy to include the Ansible version, platform and python version.""" + + python_version = sys.version_info + return u"ansible-galaxy/{ansible_version} ({platform}; python:{py_major}.{py_minor}.{py_micro})".format( + ansible_version=ansible_version, + platform=platform.system(), + py_major=python_version.major, + py_minor=python_version.minor, + py_micro=python_version.micro, + ) |