summaryrefslogtreecommitdiffstats
path: root/ansible_collections/community/okd/plugins
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-13 12:04:41 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-13 12:04:41 +0000
commit975f66f2eebe9dadba04f275774d4ab83f74cf25 (patch)
tree89bd26a93aaae6a25749145b7e4bca4a1e75b2be /ansible_collections/community/okd/plugins
parentInitial commit. (diff)
downloadansible-975f66f2eebe9dadba04f275774d4ab83f74cf25.tar.xz
ansible-975f66f2eebe9dadba04f275774d4ab83f74cf25.zip
Adding upstream version 7.7.0+dfsg.upstream/7.7.0+dfsg
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'ansible_collections/community/okd/plugins')
-rw-r--r--ansible_collections/community/okd/plugins/connection/oc.py173
-rw-r--r--ansible_collections/community/okd/plugins/inventory/openshift.py215
-rw-r--r--ansible_collections/community/okd/plugins/module_utils/k8s.py220
-rw-r--r--ansible_collections/community/okd/plugins/module_utils/openshift_adm_prune_auth.py309
-rw-r--r--ansible_collections/community/okd/plugins/module_utils/openshift_adm_prune_deployments.py122
-rw-r--r--ansible_collections/community/okd/plugins/module_utils/openshift_adm_prune_images.py477
-rw-r--r--ansible_collections/community/okd/plugins/module_utils/openshift_builds.py409
-rw-r--r--ansible_collections/community/okd/plugins/module_utils/openshift_common.py93
-rw-r--r--ansible_collections/community/okd/plugins/module_utils/openshift_docker_image.py103
-rw-r--r--ansible_collections/community/okd/plugins/module_utils/openshift_groups.py442
-rw-r--r--ansible_collections/community/okd/plugins/module_utils/openshift_images_common.py226
-rw-r--r--ansible_collections/community/okd/plugins/module_utils/openshift_import_image.py390
-rw-r--r--ansible_collections/community/okd/plugins/module_utils/openshift_ldap.py777
-rw-r--r--ansible_collections/community/okd/plugins/module_utils/openshift_process.py199
-rw-r--r--ansible_collections/community/okd/plugins/module_utils/openshift_registry.py144
-rw-r--r--ansible_collections/community/okd/plugins/modules/k8s.py308
-rw-r--r--ansible_collections/community/okd/plugins/modules/openshift_adm_groups_sync.py224
-rw-r--r--ansible_collections/community/okd/plugins/modules/openshift_adm_migrate_template_instances.py371
-rw-r--r--ansible_collections/community/okd/plugins/modules/openshift_adm_prune_auth.py132
-rw-r--r--ansible_collections/community/okd/plugins/modules/openshift_adm_prune_builds.py124
-rw-r--r--ansible_collections/community/okd/plugins/modules/openshift_adm_prune_deployments.py100
-rw-r--r--ansible_collections/community/okd/plugins/modules/openshift_adm_prune_images.py315
-rw-r--r--ansible_collections/community/okd/plugins/modules/openshift_auth.py392
-rw-r--r--ansible_collections/community/okd/plugins/modules/openshift_build.py260
-rw-r--r--ansible_collections/community/okd/plugins/modules/openshift_import_image.py194
-rw-r--r--ansible_collections/community/okd/plugins/modules/openshift_process.py236
-rw-r--r--ansible_collections/community/okd/plugins/modules/openshift_registry_info.py114
-rw-r--r--ansible_collections/community/okd/plugins/modules/openshift_route.py542
28 files changed, 7611 insertions, 0 deletions
diff --git a/ansible_collections/community/okd/plugins/connection/oc.py b/ansible_collections/community/okd/plugins/connection/oc.py
new file mode 100644
index 000000000..44236a11a
--- /dev/null
+++ b/ansible_collections/community/okd/plugins/connection/oc.py
@@ -0,0 +1,173 @@
+# Based on the docker connection plugin
+#
+# Connection plugin for configuring kubernetes containers with kubectl
+# (c) 2017, XuXinkun <xuxinkun@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author:
+ - xuxinkun (@xuxinkun)
+
+ name: oc
+
+ short_description: Execute tasks in pods running on OpenShift.
+
+ description:
+ - Use the oc exec command to run tasks in, or put/fetch files to, pods running on the OpenShift
+ container platform.
+
+
+ requirements:
+ - oc (go binary)
+
+ options:
+ oc_pod:
+ description:
+ - Pod name. Required when the host name does not match pod name.
+ default: ''
+ vars:
+ - name: ansible_oc_pod
+ env:
+ - name: K8S_AUTH_POD
+ oc_container:
+ description:
+ - Container name. Required when a pod contains more than one container.
+ default: ''
+ vars:
+ - name: ansible_oc_container
+ env:
+ - name: K8S_AUTH_CONTAINER
+ oc_namespace:
+ description:
+ - The namespace of the pod
+ default: ''
+ vars:
+ - name: ansible_oc_namespace
+ env:
+ - name: K8S_AUTH_NAMESPACE
+ oc_extra_args:
+ description:
+ - Extra arguments to pass to the oc command line.
+ default: ''
+ vars:
+ - name: ansible_oc_extra_args
+ env:
+ - name: K8S_AUTH_EXTRA_ARGS
+ oc_kubeconfig:
+ description:
+ - Path to a oc config file. Defaults to I(~/.kube/config)
+ default: ''
+ vars:
+ - name: ansible_oc_kubeconfig
+ - name: ansible_oc_config
+ env:
+ - name: K8S_AUTH_KUBECONFIG
+ oc_context:
+ description:
+ - The name of a context found in the K8s config file.
+ default: ''
+ vars:
+ - name: ansible_oc_context
+ env:
+ - name: K8S_AUTH_CONTEXT
+ oc_host:
+ description:
+ - URL for accessing the API.
+ default: ''
+ vars:
+ - name: ansible_oc_host
+ - name: ansible_oc_server
+ env:
+ - name: K8S_AUTH_HOST
+ - name: K8S_AUTH_SERVER
+ oc_token:
+ description:
+ - API authentication bearer token.
+ vars:
+ - name: ansible_oc_token
+ - name: ansible_oc_api_key
+ env:
+ - name: K8S_AUTH_TOKEN
+ - name: K8S_AUTH_API_KEY
+ client_cert:
+ description:
+ - Path to a certificate used to authenticate with the API.
+ default: ''
+ vars:
+ - name: ansible_oc_cert_file
+ - name: ansible_oc_client_cert
+ env:
+ - name: K8S_AUTH_CERT_FILE
+ aliases: [ oc_cert_file ]
+ client_key:
+ description:
+ - Path to a key file used to authenticate with the API.
+ default: ''
+ vars:
+ - name: ansible_oc_key_file
+ - name: ansible_oc_client_key
+ env:
+ - name: K8S_AUTH_KEY_FILE
+ aliases: [ oc_key_file ]
+ ca_cert:
+ description:
+ - Path to a CA certificate used to authenticate with the API.
+ default: ''
+ vars:
+ - name: ansible_oc_ssl_ca_cert
+ - name: ansible_oc_ca_cert
+ env:
+ - name: K8S_AUTH_SSL_CA_CERT
+ aliases: [ oc_ssl_ca_cert ]
+ validate_certs:
+ description:
+ - Whether or not to verify the API server's SSL certificate. Defaults to I(true).
+ default: ''
+ vars:
+ - name: ansible_oc_verify_ssl
+ - name: ansible_oc_validate_certs
+ env:
+ - name: K8S_AUTH_VERIFY_SSL
+ aliases: [ oc_verify_ssl ]
+'''
+
+from ansible_collections.kubernetes.core.plugins.connection.kubectl import Connection as KubectlConnection
+
+
+CONNECTION_TRANSPORT = 'oc'
+
+CONNECTION_OPTIONS = {
+ 'oc_container': '-c',
+ 'oc_namespace': '-n',
+ 'oc_kubeconfig': '--kubeconfig',
+ 'oc_context': '--context',
+ 'oc_host': '--server',
+ 'client_cert': '--client-certificate',
+ 'client_key': '--client-key',
+ 'ca_cert': '--certificate-authority',
+ 'validate_certs': '--insecure-skip-tls-verify',
+ 'oc_token': '--token'
+}
+
+
+class Connection(KubectlConnection):
+ ''' Local oc based connections '''
+ transport = CONNECTION_TRANSPORT
+ connection_options = CONNECTION_OPTIONS
+ documentation = DOCUMENTATION
diff --git a/ansible_collections/community/okd/plugins/inventory/openshift.py b/ansible_collections/community/okd/plugins/inventory/openshift.py
new file mode 100644
index 000000000..f69c652fc
--- /dev/null
+++ b/ansible_collections/community/okd/plugins/inventory/openshift.py
@@ -0,0 +1,215 @@
+# Copyright (c) 2018 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ name: openshift
+ author:
+ - Chris Houseknecht (@chouseknecht)
+
+ short_description: OpenShift inventory source
+
+ description:
+ - Fetch containers, services and routes for one or more clusters
+ - Groups by cluster name, namespace, namespace_services, namespace_pods, namespace_routes, and labels
+ - Uses openshift.(yml|yaml) YAML configuration file to set parameter values.
+
+ options:
+ plugin:
+ description: token that ensures this is a source file for the 'openshift' plugin.
+ required: True
+ choices: ['openshift', 'community.okd.openshift']
+ connections:
+ description:
+ - Optional list of cluster connection settings. If no connections are provided, the default
+ I(~/.kube/config) and active context will be used, and objects will be returned for all namespaces
+ the active user is authorized to access.
+ suboptions:
+ name:
+ description:
+ - Optional name to assign to the cluster. If not provided, a name is constructed from the server
+ and port.
+ kubeconfig:
+ description:
+ - Path to an existing Kubernetes config file. If not provided, and no other connection
+ options are provided, the Kubernetes client will attempt to load the default
+ configuration file from I(~/.kube/config). Can also be specified via K8S_AUTH_KUBECONFIG
+ environment variable.
+ context:
+ description:
+ - The name of a context found in the config file. Can also be specified via K8S_AUTH_CONTEXT environment
+ variable.
+ host:
+ description:
+ - Provide a URL for accessing the API. Can also be specified via K8S_AUTH_HOST environment variable.
+ api_key:
+ description:
+ - Token used to authenticate with the API. Can also be specified via K8S_AUTH_API_KEY environment
+ variable.
+ username:
+ description:
+ - Provide a username for authenticating with the API. Can also be specified via K8S_AUTH_USERNAME
+ environment variable.
+ password:
+ description:
+ - Provide a password for authenticating with the API. Can also be specified via K8S_AUTH_PASSWORD
+ environment variable.
+ client_cert:
+ description:
+ - Path to a certificate used to authenticate with the API. Can also be specified via K8S_AUTH_CERT_FILE
+ environment variable.
+ aliases: [ cert_file ]
+ client_key:
+ description:
+ - Path to a key file used to authenticate with the API. Can also be specified via K8S_AUTH_KEY_FILE
+ environment variable.
+ aliases: [ key_file ]
+ ca_cert:
+ description:
+ - Path to a CA certificate used to authenticate with the API. Can also be specified via
+ K8S_AUTH_SSL_CA_CERT environment variable.
+ aliases: [ ssl_ca_cert ]
+ validate_certs:
+ description:
+ - "Whether or not to verify the API server's SSL certificates. Can also be specified via
+ K8S_AUTH_VERIFY_SSL environment variable."
+ type: bool
+ aliases: [ verify_ssl ]
+ namespaces:
+ description:
+ - List of namespaces. If not specified, will fetch all containers for all namespaces user is authorized
+ to access.
+
+ requirements:
+ - "python >= 3.6"
+ - "kubernetes >= 12.0.0"
+ - "PyYAML >= 3.11"
+'''
+
+EXAMPLES = '''
+# File must be named openshift.yaml or openshift.yml
+
+# Authenticate with token, and return all pods and services for all namespaces
+plugin: community.okd.openshift
+connections:
+ - host: https://192.168.64.4:8443
+ api_key: xxxxxxxxxxxxxxxx
+ verify_ssl: false
+
+# Use default config (~/.kube/config) file and active context, and return objects for a specific namespace
+plugin: community.okd.openshift
+connections:
+ - namespaces:
+ - testing
+
+# Use a custom config file, and a specific context.
+plugin: community.okd.openshift
+connections:
+ - kubeconfig: /path/to/config
+ context: 'awx/192-168-64-4:8443/developer'
+'''
+
+try:
+ from ansible_collections.kubernetes.core.plugins.inventory.k8s import K8sInventoryException, InventoryModule as K8sInventoryModule, format_dynamic_api_exc
+ from ansible_collections.kubernetes.core.plugins.module_utils.k8s.client import get_api_client
+ HAS_KUBERNETES_COLLECTION = True
+except ImportError as e:
+ HAS_KUBERNETES_COLLECTION = False
+
+
+try:
+ from kubernetes.dynamic.exceptions import DynamicApiError
+except ImportError:
+ pass
+
+
+class InventoryModule(K8sInventoryModule):
+ NAME = 'community.okd.openshift'
+
+ connection_plugin = 'community.okd.oc'
+ transport = 'oc'
+
+ def check_kubernetes_collection(self):
+
+ if not HAS_KUBERNETES_COLLECTION:
+ K8sInventoryException("The kubernetes.core collection must be installed")
+
+ def fetch_objects(self, connections):
+ self.check_kubernetes_collection()
+ super(InventoryModule, self).fetch_objects(connections)
+
+ if connections:
+ if not isinstance(connections, list):
+ raise K8sInventoryException("Expecting connections to be a list.")
+
+ for connection in connections:
+ client = get_api_client(**connection)
+ name = connection.get('name', self.get_default_host_name(client.configuration.host))
+ if connection.get('namespaces'):
+ namespaces = connection['namespaces']
+ else:
+ namespaces = self.get_available_namespaces(client)
+ for namespace in namespaces:
+ self.get_routes_for_namespace(client, name, namespace)
+ else:
+ client = get_api_client()
+ name = self.get_default_host_name(client.configuration.host)
+ namespaces = self.get_available_namespaces(client)
+ for namespace in namespaces:
+ self.get_routes_for_namespace(client, name, namespace)
+
+ def get_routes_for_namespace(self, client, name, namespace):
+ self.check_kubernetes_collection()
+ v1_route = client.resources.get(api_version='route.openshift.io/v1', kind='Route')
+ try:
+ obj = v1_route.get(namespace=namespace)
+ except DynamicApiError as exc:
+ self.display.debug(exc)
+ raise K8sInventoryException('Error fetching Routes list: %s' % format_dynamic_api_exc(exc))
+
+ namespace_group = 'namespace_{0}'.format(namespace)
+ namespace_routes_group = '{0}_routes'.format(namespace_group)
+
+ self.inventory.add_group(name)
+ self.inventory.add_group(namespace_group)
+ self.inventory.add_child(name, namespace_group)
+ self.inventory.add_group(namespace_routes_group)
+ self.inventory.add_child(namespace_group, namespace_routes_group)
+ for route in obj.items:
+ route_name = route.metadata.name
+ route_annotations = {} if not route.metadata.annotations else dict(route.metadata.annotations)
+
+ self.inventory.add_host(route_name)
+
+ if route.metadata.labels:
+ # create a group for each label_value
+ for key, value in route.metadata.labels:
+ group_name = 'label_{0}_{1}'.format(key, value)
+ self.inventory.add_group(group_name)
+ self.inventory.add_child(group_name, route_name)
+ route_labels = dict(route.metadata.labels)
+ else:
+ route_labels = {}
+
+ self.inventory.add_child(namespace_routes_group, route_name)
+
+ # add hostvars
+ self.inventory.set_variable(route_name, 'labels', route_labels)
+ self.inventory.set_variable(route_name, 'annotations', route_annotations)
+ self.inventory.set_variable(route_name, 'cluster_name', route.metadata.clusterName)
+ self.inventory.set_variable(route_name, 'object_type', 'route')
+ self.inventory.set_variable(route_name, 'self_link', route.metadata.selfLink)
+ self.inventory.set_variable(route_name, 'resource_version', route.metadata.resourceVersion)
+ self.inventory.set_variable(route_name, 'uid', route.metadata.uid)
+
+ if route.spec.host:
+ self.inventory.set_variable(route_name, 'host', route.spec.host)
+
+ if route.spec.path:
+ self.inventory.set_variable(route_name, 'path', route.spec.path)
+
+ if hasattr(route.spec.port, 'targetPort') and route.spec.port.targetPort:
+ self.inventory.set_variable(route_name, 'port', dict(route.spec.port))
diff --git a/ansible_collections/community/okd/plugins/module_utils/k8s.py b/ansible_collections/community/okd/plugins/module_utils/k8s.py
new file mode 100644
index 000000000..87ec70d90
--- /dev/null
+++ b/ansible_collections/community/okd/plugins/module_utils/k8s.py
@@ -0,0 +1,220 @@
+#!/usr/bin/env python
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+import operator
+from functools import reduce
+from ansible_collections.community.okd.plugins.module_utils.openshift_common import AnsibleOpenshiftModule
+
+try:
+ from ansible_collections.kubernetes.core.plugins.module_utils.k8s.resource import create_definitions
+ from ansible_collections.kubernetes.core.plugins.module_utils.k8s.exceptions import CoreException
+except ImportError:
+ pass
+
+from ansible.module_utils._text import to_native
+
+try:
+ from kubernetes.dynamic.exceptions import DynamicApiError, NotFoundError, ForbiddenError
+except ImportError as e:
+ pass
+
+
+TRIGGER_ANNOTATION = 'image.openshift.io/triggers'
+TRIGGER_CONTAINER = re.compile(r"(?P<path>.*)\[((?P<index>[0-9]+)|\?\(@\.name==[\"'\\]*(?P<name>[a-z0-9]([-a-z0-9]*[a-z0-9])?))")
+
+
+class OKDRawModule(AnsibleOpenshiftModule):
+
+ def __init__(self, **kwargs):
+
+ super(OKDRawModule, self).__init__(**kwargs)
+
+ @property
+ def module(self):
+ return self._module
+
+ def execute_module(self):
+ results = []
+ changed = False
+
+ try:
+ definitions = create_definitions(self.params)
+ except Exception as e:
+ msg = "Failed to load resource definition: {0}".format(e)
+ raise CoreException(msg) from e
+
+ for definition in definitions:
+ result = {"changed": False, "result": {}}
+ warnings = []
+
+ if self.params.get("state") != 'absent':
+ existing = None
+ name = definition.get("metadata", {}).get("name")
+ namespace = definition.get("metadata", {}).get("namespace")
+ if definition.get("kind") in ['Project', 'ProjectRequest']:
+ try:
+ resource = self.svc.find_resource(kind=definition.get("kind"), api_version=definition.get("apiVersion", "v1"))
+ existing = resource.get(name=name, namespace=namespace).to_dict()
+ except (NotFoundError, ForbiddenError):
+ result = self.create_project_request(definition)
+ changed |= result["changed"]
+ results.append(result)
+ continue
+ except DynamicApiError as exc:
+ self.fail_json(msg='Failed to retrieve requested object: {0}'.format(exc.body),
+ error=exc.status, status=exc.status, reason=exc.reason)
+
+ if definition.get("kind") not in ['Project', 'ProjectRequest']:
+ try:
+ resource = self.svc.find_resource(kind=definition.get("kind"), api_version=definition.get("apiVersion", "v1"))
+ existing = resource.get(name=name, namespace=namespace).to_dict()
+ except Exception:
+ existing = None
+
+ if existing:
+ if resource.kind == 'DeploymentConfig':
+ if definition.get('spec', {}).get('triggers'):
+ definition = self.resolve_imagestream_triggers(existing, definition)
+ elif existing['metadata'].get('annotations', {}).get(TRIGGER_ANNOTATION):
+ definition = self.resolve_imagestream_trigger_annotation(existing, definition)
+
+ if self.params.get("validate") is not None:
+ warnings = self.validate(definition)
+
+ try:
+ result = self.perform_action(definition, self.params)
+ except Exception as e:
+ try:
+ error = e.result
+ except AttributeError:
+ error = {}
+ try:
+ error["reason"] = e.__cause__.reason
+ except AttributeError:
+ pass
+ error["msg"] = to_native(e)
+ if warnings:
+ error.setdefault("warnings", []).extend(warnings)
+
+ if self.params.get("continue_on_error"):
+ result["error"] = error
+ else:
+ self.fail_json(**error)
+
+ if warnings:
+ result.setdefault("warnings", []).extend(warnings)
+ changed |= result["changed"]
+ results.append(result)
+
+ if len(results) == 1:
+ self.exit_json(**results[0])
+
+ self.exit_json(**{"changed": changed, "result": {"results": results}})
+
+ @staticmethod
+ def get_index(desired, objects, keys):
+ """ Iterates over keys, returns the first object from objects where the value of the key
+ matches the value in desired
+ """
+ # pylint: disable=use-a-generator
+ # Use a generator instead 'all(desired.get(key, True) == item.get(key, False) for key in keys)'
+ for i, item in enumerate(objects):
+ if item and all([desired.get(key, True) == item.get(key, False) for key in keys]):
+ return i
+
+ def resolve_imagestream_trigger_annotation(self, existing, definition):
+ import yaml
+
+ def get_from_fields(d, fields):
+ try:
+ return reduce(operator.getitem, fields, d)
+ except Exception:
+ return None
+
+ def set_from_fields(d, fields, value):
+ get_from_fields(d, fields[:-1])[fields[-1]] = value
+
+ if TRIGGER_ANNOTATION in definition['metadata'].get('annotations', {}).keys():
+ triggers = yaml.safe_load(definition['metadata']['annotations'][TRIGGER_ANNOTATION] or '[]')
+ else:
+ triggers = yaml.safe_load(existing['metadata'].get('annotations', '{}').get(TRIGGER_ANNOTATION, '[]'))
+
+ if not isinstance(triggers, list):
+ return definition
+
+ for trigger in triggers:
+ if trigger.get('fieldPath'):
+ parsed = self.parse_trigger_fieldpath(trigger['fieldPath'])
+ path = parsed.get('path', '').split('.')
+ if path:
+ existing_containers = get_from_fields(existing, path)
+ new_containers = get_from_fields(definition, path)
+ if parsed.get('name'):
+ existing_index = self.get_index({'name': parsed['name']}, existing_containers, ['name'])
+ new_index = self.get_index({'name': parsed['name']}, new_containers, ['name'])
+ elif parsed.get('index') is not None:
+ existing_index = new_index = int(parsed['index'])
+ else:
+ existing_index = new_index = None
+ if existing_index is not None and new_index is not None:
+ if existing_index < len(existing_containers) and new_index < len(new_containers):
+ set_from_fields(definition, path + [new_index, 'image'], get_from_fields(existing, path + [existing_index, 'image']))
+ return definition
+
+ def resolve_imagestream_triggers(self, existing, definition):
+
+ existing_triggers = existing.get('spec', {}).get('triggers')
+ new_triggers = definition['spec']['triggers']
+ existing_containers = existing.get('spec', {}).get('template', {}).get('spec', {}).get('containers', [])
+ new_containers = definition.get('spec', {}).get('template', {}).get('spec', {}).get('containers', [])
+ for i, trigger in enumerate(new_triggers):
+ if trigger.get('type') == 'ImageChange' and trigger.get('imageChangeParams'):
+ names = trigger['imageChangeParams'].get('containerNames', [])
+ for name in names:
+ old_container_index = self.get_index({'name': name}, existing_containers, ['name'])
+ new_container_index = self.get_index({'name': name}, new_containers, ['name'])
+ if old_container_index is not None and new_container_index is not None:
+ image = existing['spec']['template']['spec']['containers'][old_container_index]['image']
+ definition['spec']['template']['spec']['containers'][new_container_index]['image'] = image
+
+ existing_index = self.get_index(trigger['imageChangeParams'],
+ [x.get('imageChangeParams') for x in existing_triggers],
+ ['containerNames'])
+ if existing_index is not None:
+ existing_image = existing_triggers[existing_index].get('imageChangeParams', {}).get('lastTriggeredImage')
+ if existing_image:
+ definition['spec']['triggers'][i]['imageChangeParams']['lastTriggeredImage'] = existing_image
+ existing_from = existing_triggers[existing_index].get('imageChangeParams', {}).get('from', {})
+ new_from = trigger['imageChangeParams'].get('from', {})
+ existing_namespace = existing_from.get('namespace')
+ existing_name = existing_from.get('name', False)
+ new_name = new_from.get('name', True)
+ add_namespace = existing_namespace and 'namespace' not in new_from.keys() and existing_name == new_name
+ if add_namespace:
+ definition['spec']['triggers'][i]['imageChangeParams']['from']['namespace'] = existing_from['namespace']
+
+ return definition
+
+ def parse_trigger_fieldpath(self, expression):
+ parsed = TRIGGER_CONTAINER.search(expression).groupdict()
+ if parsed.get('index'):
+ parsed['index'] = int(parsed['index'])
+ return parsed
+
+ def create_project_request(self, definition):
+ definition['kind'] = 'ProjectRequest'
+ result = {'changed': False, 'result': {}}
+ resource = self.svc.find_resource(kind='ProjectRequest', api_version=definition['apiVersion'], fail=True)
+ if not self.check_mode:
+ try:
+ k8s_obj = resource.create(definition)
+ result['result'] = k8s_obj.to_dict()
+ except DynamicApiError as exc:
+ self.fail_json(msg="Failed to create object: {0}".format(exc.body),
+ error=exc.status, status=exc.status, reason=exc.reason)
+ result['changed'] = True
+ result['method'] = 'create'
+ return result
diff --git a/ansible_collections/community/okd/plugins/module_utils/openshift_adm_prune_auth.py b/ansible_collections/community/okd/plugins/module_utils/openshift_adm_prune_auth.py
new file mode 100644
index 000000000..e5143ae4e
--- /dev/null
+++ b/ansible_collections/community/okd/plugins/module_utils/openshift_adm_prune_auth.py
@@ -0,0 +1,309 @@
+#!/usr/bin/env python
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.module_utils._text import to_native
+
+from ansible_collections.community.okd.plugins.module_utils.openshift_common import AnsibleOpenshiftModule
+
+try:
+ from kubernetes import client
+ from kubernetes.dynamic.exceptions import DynamicApiError, NotFoundError
+except ImportError:
+ pass
+
+
+class OpenShiftAdmPruneAuth(AnsibleOpenshiftModule):
+ def __init__(self, **kwargs):
+ super(OpenShiftAdmPruneAuth, self).__init__(**kwargs)
+
+ def prune_resource_binding(self, kind, api_version, ref_kind, ref_namespace_names, propagation_policy=None):
+
+ resource = self.find_resource(kind=kind, api_version=api_version, fail=True)
+ candidates = []
+ for ref_namespace, ref_name in ref_namespace_names:
+ try:
+ result = resource.get(name=None, namespace=ref_namespace)
+ result = result.to_dict()
+ result = result.get('items') if 'items' in result else [result]
+ for obj in result:
+ namespace = obj['metadata'].get('namespace', None)
+ name = obj['metadata'].get('name')
+ if ref_kind and obj['roleRef']['kind'] != ref_kind:
+ # skip this binding as the roleRef.kind does not match
+ continue
+ if obj['roleRef']['name'] == ref_name:
+ # select this binding as the roleRef.name match
+ candidates.append((namespace, name))
+ except NotFoundError:
+ continue
+ except DynamicApiError as exc:
+ msg = "Failed to get {kind} resource due to: {msg}".format(kind=kind, msg=exc.body)
+ self.fail_json(msg=msg)
+ except Exception as e:
+ msg = "Failed to get {kind} due to: {msg}".format(kind=kind, msg=to_native(e))
+ self.fail_json(msg=msg)
+
+ if len(candidates) == 0 or self.check_mode:
+ return [y if x is None else x + "/" + y for x, y in candidates]
+
+ delete_options = client.V1DeleteOptions()
+ if propagation_policy:
+ delete_options.propagation_policy = propagation_policy
+
+ for namespace, name in candidates:
+ try:
+ result = resource.delete(name=name, namespace=namespace, body=delete_options)
+ except DynamicApiError as exc:
+ msg = "Failed to delete {kind} {namespace}/{name} due to: {msg}".format(kind=kind, namespace=namespace, name=name, msg=exc.body)
+ self.fail_json(msg=msg)
+ except Exception as e:
+ msg = "Failed to delete {kind} {namespace}/{name} due to: {msg}".format(kind=kind, namespace=namespace, name=name, msg=to_native(e))
+ self.fail_json(msg=msg)
+ return [y if x is None else x + "/" + y for x, y in candidates]
+
+ def update_resource_binding(self, ref_kind, ref_names, namespaced=False):
+
+ kind = 'ClusterRoleBinding'
+ api_version = "rbac.authorization.k8s.io/v1",
+ if namespaced:
+ kind = "RoleBinding"
+ resource = self.find_resource(kind=kind, api_version=api_version, fail=True)
+ result = resource.get(name=None, namespace=None).to_dict()
+ result = result.get('items') if 'items' in result else [result]
+
+ if len(result) == 0:
+ return [], False
+
+ def _update_user_group(binding_namespace, subjects):
+ users, groups = [], []
+ for x in subjects:
+ if x['kind'] == 'User':
+ users.append(x['name'])
+ elif x['kind'] == 'Group':
+ groups.append(x['name'])
+ elif x['kind'] == 'ServiceAccount':
+ namespace = binding_namespace
+ if x.get('namespace') is not None:
+ namespace = x.get('namespace')
+ if namespace is not None:
+ users.append("system:serviceaccount:%s:%s" % (namespace, x['name']))
+ return users, groups
+
+ candidates = []
+ changed = False
+ for item in result:
+ subjects = item.get('subjects', [])
+ retainedSubjects = [x for x in subjects if x['kind'] == ref_kind and x['name'] in ref_names]
+ if len(subjects) != len(retainedSubjects):
+ updated_binding = item
+ updated_binding['subjects'] = retainedSubjects
+ binding_namespace = item['metadata'].get('namespace', None)
+ updated_binding['userNames'], updated_binding['groupNames'] = _update_user_group(binding_namespace, retainedSubjects)
+ candidates.append(binding_namespace + "/" + item['metadata']['name'] if binding_namespace else item['metadata']['name'])
+ changed = True
+ if not self.check_mode:
+ try:
+ resource.apply(updated_binding, namespace=binding_namespace)
+ except DynamicApiError as exc:
+ msg = "Failed to apply object due to: {0}".format(exc.body)
+ self.fail_json(msg=msg)
+ return candidates, changed
+
+ def update_security_context(self, ref_names, key):
+ params = {'kind': 'SecurityContextConstraints', 'api_version': 'security.openshift.io/v1'}
+ sccs = self.kubernetes_facts(**params)
+ if not sccs['api_found']:
+ self.fail_json(msg=sccs['msg'])
+ sccs = sccs.get('resources')
+
+ candidates = []
+ changed = False
+ resource = self.find_resource(kind="SecurityContextConstraints", api_version="security.openshift.io/v1")
+ for item in sccs:
+ subjects = item.get(key, [])
+ retainedSubjects = [x for x in subjects if x not in ref_names]
+ if len(subjects) != len(retainedSubjects):
+ candidates.append(item['metadata']['name'])
+ changed = True
+ if not self.check_mode:
+ upd_sec_ctx = item
+ upd_sec_ctx.update({key: retainedSubjects})
+ try:
+ resource.apply(upd_sec_ctx, namespace=None)
+ except DynamicApiError as exc:
+ msg = "Failed to apply object due to: {0}".format(exc.body)
+ self.fail_json(msg=msg)
+ return candidates, changed
+
+ def auth_prune_roles(self):
+ params = {'kind': 'Role', 'api_version': 'rbac.authorization.k8s.io/v1', 'namespace': self.params.get('namespace')}
+ for attr in ('name', 'label_selectors'):
+ if self.params.get(attr):
+ params[attr] = self.params.get(attr)
+
+ result = self.kubernetes_facts(**params)
+ if not result['api_found']:
+ self.fail_json(msg=result['msg'])
+
+ roles = result.get('resources')
+ if len(roles) == 0:
+ self.exit_json(changed=False, msg="No candidate rolebinding to prune from namespace %s." % self.params.get('namespace'))
+
+ ref_roles = [(x['metadata']['namespace'], x['metadata']['name']) for x in roles]
+ candidates = self.prune_resource_binding(kind="RoleBinding",
+ api_version="rbac.authorization.k8s.io/v1",
+ ref_kind="Role",
+ ref_namespace_names=ref_roles,
+ propagation_policy='Foreground')
+ if len(candidates) == 0:
+ self.exit_json(changed=False, role_binding=candidates)
+
+ self.exit_json(changed=True, role_binding=candidates)
+
+ def auth_prune_clusterroles(self):
+ params = {'kind': 'ClusterRole', 'api_version': 'rbac.authorization.k8s.io/v1'}
+ for attr in ('name', 'label_selectors'):
+ if self.params.get(attr):
+ params[attr] = self.params.get(attr)
+
+ result = self.kubernetes_facts(**params)
+ if not result['api_found']:
+ self.fail_json(msg=result['msg'])
+
+ clusterroles = result.get('resources')
+ if len(clusterroles) == 0:
+ self.exit_json(changed=False, msg="No clusterroles found matching input criteria.")
+
+ ref_clusterroles = [(None, x['metadata']['name']) for x in clusterroles]
+
+ # Prune ClusterRoleBinding
+ candidates_cluster_binding = self.prune_resource_binding(kind="ClusterRoleBinding",
+ api_version="rbac.authorization.k8s.io/v1",
+ ref_kind=None,
+ ref_namespace_names=ref_clusterroles)
+
+ # Prune Role Binding
+ candidates_namespaced_binding = self.prune_resource_binding(kind="RoleBinding",
+ api_version="rbac.authorization.k8s.io/v1",
+ ref_kind='ClusterRole',
+ ref_namespace_names=ref_clusterroles)
+
+ self.exit_json(changed=True,
+ cluster_role_binding=candidates_cluster_binding,
+ role_binding=candidates_namespaced_binding)
+
+ def list_groups(self, params=None):
+ options = {'kind': 'Group', 'api_version': 'user.openshift.io/v1'}
+ if params:
+ for attr in ('name', 'label_selectors'):
+ if params.get(attr):
+ options[attr] = params.get(attr)
+ return self.kubernetes_facts(**options)
+
+ def auth_prune_users(self):
+ params = {'kind': 'User', 'api_version': 'user.openshift.io/v1'}
+ for attr in ('name', 'label_selectors'):
+ if self.params.get(attr):
+ params[attr] = self.params.get(attr)
+
+ users = self.kubernetes_facts(**params)
+ if len(users) == 0:
+ self.exit_json(changed=False, msg="No resource type 'User' found matching input criteria.")
+
+ names = [x['metadata']['name'] for x in users]
+ changed = False
+ # Remove the user role binding
+ rolebinding, changed_role = self.update_resource_binding(ref_kind="User",
+ ref_names=names,
+ namespaced=True)
+ changed = changed or changed_role
+ # Remove the user cluster role binding
+ clusterrolesbinding, changed_cr = self.update_resource_binding(ref_kind="User",
+ ref_names=names)
+ changed = changed or changed_cr
+
+ # Remove the user from security context constraints
+ sccs, changed_sccs = self.update_security_context(names, 'users')
+ changed = changed or changed_sccs
+
+ # Remove the user from groups
+ groups = self.list_groups()
+ deleted_groups = []
+ resource = self.find_resource(kind="Group", api_version="user.openshift.io/v1")
+ for grp in groups:
+ subjects = grp.get('users', [])
+ retainedSubjects = [x for x in subjects if x not in names]
+ if len(subjects) != len(retainedSubjects):
+ deleted_groups.append(grp['metadata']['name'])
+ changed = True
+ if not self.check_mode:
+ upd_group = grp
+ upd_group.update({'users': retainedSubjects})
+ try:
+ resource.apply(upd_group, namespace=None)
+ except DynamicApiError as exc:
+ msg = "Failed to apply object due to: {0}".format(exc.body)
+ self.fail_json(msg=msg)
+
+ # Remove the user's OAuthClientAuthorizations
+ oauth = self.kubernetes_facts(kind='OAuthClientAuthorization', api_version='oauth.openshift.io/v1')
+ deleted_auths = []
+ resource = self.find_resource(kind="OAuthClientAuthorization", api_version="oauth.openshift.io/v1")
+ for authorization in oauth:
+ if authorization.get('userName', None) in names:
+ auth_name = authorization['metadata']['name']
+ deleted_auths.append(auth_name)
+ changed = True
+ if not self.check_mode:
+ try:
+ resource.delete(name=auth_name, namespace=None, body=client.V1DeleteOptions())
+ except DynamicApiError as exc:
+ msg = "Failed to delete OAuthClientAuthorization {name} due to: {msg}".format(name=auth_name, msg=exc.body)
+ self.fail_json(msg=msg)
+ except Exception as e:
+ msg = "Failed to delete OAuthClientAuthorization {name} due to: {msg}".format(name=auth_name, msg=to_native(e))
+ self.fail_json(msg=msg)
+
+ self.exit_json(changed=changed,
+ cluster_role_binding=clusterrolesbinding,
+ role_binding=rolebinding,
+ security_context_constraints=sccs,
+ authorization=deleted_auths,
+ group=deleted_groups)
+
+ def auth_prune_groups(self):
+ groups = self.list_groups(params=self.params)
+ if len(groups) == 0:
+ self.exit_json(changed=False, result="No resource type 'Group' found matching input criteria.")
+
+ names = [x['metadata']['name'] for x in groups]
+
+ changed = False
+ # Remove the groups role binding
+ rolebinding, changed_role = self.update_resource_binding(ref_kind="Group",
+ ref_names=names,
+ namespaced=True)
+ changed = changed or changed_role
+ # Remove the groups cluster role binding
+ clusterrolesbinding, changed_cr = self.update_resource_binding(ref_kind="Group",
+ ref_names=names)
+ changed = changed or changed_cr
+ # Remove the groups security context constraints
+ sccs, changed_sccs = self.update_security_context(names, 'groups')
+ changed = changed or changed_sccs
+
+ self.exit_json(changed=changed,
+ cluster_role_binding=clusterrolesbinding,
+ role_binding=rolebinding,
+ security_context_constraints=sccs)
+
+ def execute_module(self):
+ auth_prune = {
+ 'roles': self.auth_prune_roles,
+ 'clusterroles': self.auth_prune_clusterroles,
+ 'users': self.auth_prune_users,
+ 'groups': self.auth_prune_groups,
+ }
+ auth_prune[self.params.get('resource')]()
diff --git a/ansible_collections/community/okd/plugins/module_utils/openshift_adm_prune_deployments.py b/ansible_collections/community/okd/plugins/module_utils/openshift_adm_prune_deployments.py
new file mode 100644
index 000000000..418922d52
--- /dev/null
+++ b/ansible_collections/community/okd/plugins/module_utils/openshift_adm_prune_deployments.py
@@ -0,0 +1,122 @@
+#!/usr/bin/env python
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from datetime import datetime, timezone
+import traceback
+
+from ansible.module_utils._text import to_native
+
+from ansible_collections.community.okd.plugins.module_utils.openshift_common import AnsibleOpenshiftModule
+
+try:
+ from kubernetes import client
+ from kubernetes.dynamic.exceptions import DynamicApiError
+except ImportError as e:
+ pass
+
+
+def get_deploymentconfig_for_replicationcontroller(replica_controller):
+ # DeploymentConfigAnnotation is an annotation name used to correlate a deployment with the
+ # DeploymentConfig on which the deployment is based.
+ # This is set on replication controller pod template by deployer controller.
+ DeploymentConfigAnnotation = "openshift.io/deployment-config.name"
+ try:
+ deploymentconfig_name = replica_controller['metadata']['annotations'].get(DeploymentConfigAnnotation)
+ if deploymentconfig_name is None or deploymentconfig_name == "":
+ return None
+ return deploymentconfig_name
+ except Exception:
+ return None
+
+
+class OpenShiftAdmPruneDeployment(AnsibleOpenshiftModule):
+
+ def __init__(self, **kwargs):
+ super(OpenShiftAdmPruneDeployment, self).__init__(**kwargs)
+
+ def filter_replication_controller(self, replicacontrollers):
+ def _deployment(obj):
+ return get_deploymentconfig_for_replicationcontroller(obj) is not None
+
+ def _zeroReplicaSize(obj):
+ return obj['spec']['replicas'] == 0 and obj['status']['replicas'] == 0
+
+ def _complete_failed(obj):
+ DeploymentStatusAnnotation = "openshift.io/deployment.phase"
+ try:
+ # validate that replication controller status is either 'Complete' or 'Failed'
+ deployment_phase = obj['metadata']['annotations'].get(DeploymentStatusAnnotation)
+ return deployment_phase in ('Failed', 'Complete')
+ except Exception:
+ return False
+
+ def _younger(obj):
+ creation_timestamp = datetime.strptime(obj['metadata']['creationTimestamp'], '%Y-%m-%dT%H:%M:%SZ')
+ now = datetime.now(timezone.utc).replace(tzinfo=None)
+ age = (now - creation_timestamp).seconds / 60
+ return age > self.params['keep_younger_than']
+
+ def _orphan(obj):
+ try:
+ # verify if the deploymentconfig associated to the replication controller is still existing
+ deploymentconfig_name = get_deploymentconfig_for_replicationcontroller(obj)
+ params = dict(
+ kind="DeploymentConfig",
+ api_version="apps.openshift.io/v1",
+ name=deploymentconfig_name,
+ namespace=obj["metadata"]["name"],
+ )
+ exists = self.kubernetes_facts(**params)
+ return not (exists.get['api_found'] and len(exists['resources']) > 0)
+ except Exception:
+ return False
+
+ predicates = [_deployment, _zeroReplicaSize, _complete_failed]
+ if self.params['orphans']:
+ predicates.append(_orphan)
+ if self.params['keep_younger_than']:
+ predicates.append(_younger)
+
+ results = replicacontrollers.copy()
+ for pred in predicates:
+ results = filter(pred, results)
+ return list(results)
+
+ def execute_module(self):
+ # list replicationcontroller candidate for pruning
+ kind = 'ReplicationController'
+ api_version = 'v1'
+ resource = self.find_resource(kind=kind, api_version=api_version, fail=True)
+
+ # Get ReplicationController
+ params = dict(
+ kind=kind,
+ api_version="v1",
+ namespace=self.params.get("namespace"),
+ )
+ candidates = self.kubernetes_facts(**params)
+ candidates = self.filter_replication_controller(candidates["resources"])
+
+ if len(candidates) == 0:
+ self.exit_json(changed=False, replication_controllers=[])
+
+ changed = True
+ delete_options = client.V1DeleteOptions(propagation_policy='Background')
+ replication_controllers = []
+ for replica in candidates:
+ try:
+ result = replica
+ if not self.check_mode:
+ name = replica["metadata"]["name"]
+ namespace = replica["metadata"]["namespace"]
+ result = resource.delete(name=name, namespace=namespace, body=delete_options).to_dict()
+ replication_controllers.append(result)
+ except DynamicApiError as exc:
+ msg = "Failed to delete ReplicationController {namespace}/{name} due to: {msg}".format(namespace=namespace, name=name, msg=exc.body)
+ self.fail_json(msg=msg)
+ except Exception as e:
+ msg = "Failed to delete ReplicationController {namespace}/{name} due to: {msg}".format(namespace=namespace, name=name, msg=to_native(e))
+ self.fail_json(msg=msg)
+ self.exit_json(changed=changed, replication_controllers=replication_controllers)
diff --git a/ansible_collections/community/okd/plugins/module_utils/openshift_adm_prune_images.py b/ansible_collections/community/okd/plugins/module_utils/openshift_adm_prune_images.py
new file mode 100644
index 000000000..442cf9010
--- /dev/null
+++ b/ansible_collections/community/okd/plugins/module_utils/openshift_adm_prune_images.py
@@ -0,0 +1,477 @@
+#!/usr/bin/env python
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from datetime import datetime, timezone, timedelta
+import traceback
+import copy
+
+from ansible.module_utils._text import to_native
+from ansible.module_utils.parsing.convert_bool import boolean
+from ansible.module_utils.six import iteritems
+
+from ansible_collections.community.okd.plugins.module_utils.openshift_common import AnsibleOpenshiftModule
+
+from ansible_collections.community.okd.plugins.module_utils.openshift_images_common import (
+ OpenShiftAnalyzeImageStream,
+ get_image_blobs,
+ is_too_young_object,
+ is_created_after,
+)
+from ansible_collections.community.okd.plugins.module_utils.openshift_docker_image import (
+ parse_docker_image_ref,
+ convert_storage_to_bytes,
+)
+
+try:
+ from kubernetes import client
+ from kubernetes.client import rest
+ from kubernetes.dynamic.exceptions import (
+ DynamicApiError,
+ NotFoundError,
+ ApiException
+ )
+except ImportError:
+ pass
+
+
+ApiConfiguration = {
+ "LimitRange": "v1",
+ "Pod": "v1",
+ "ReplicationController": "v1",
+ "DaemonSet": "apps/v1",
+ "Deployment": "apps/v1",
+ "ReplicaSet": "apps/v1",
+ "StatefulSet": "apps/v1",
+ "Job": "batch/v1",
+ "CronJob": "batch/v1beta1",
+ "DeploymentConfig": "apps.openshift.io/v1",
+ "BuildConfig": "build.openshift.io/v1",
+ "Build": "build.openshift.io/v1",
+ "Image": "image.openshift.io/v1",
+ "ImageStream": "image.openshift.io/v1",
+}
+
+
+def read_object_annotation(obj, name):
+ return obj["metadata"]["annotations"].get(name)
+
+
+def determine_host_registry(module, images, image_streams):
+ # filter managed images
+ def _f_managed_images(obj):
+ value = read_object_annotation(obj, "openshift.io/image.managed")
+ return boolean(value) if value is not None else False
+
+ managed_images = list(filter(_f_managed_images, images))
+
+ # Be sure to pick up the newest managed image which should have an up to date information
+ sorted_images = sorted(managed_images,
+ key=lambda x: x["metadata"]["creationTimestamp"],
+ reverse=True)
+ docker_image_ref = ""
+ if len(sorted_images) > 0:
+ docker_image_ref = sorted_images[0].get("dockerImageReference", "")
+ else:
+ # 2nd try to get the pull spec from any image stream
+ # Sorting by creation timestamp may not get us up to date info. Modification time would be much
+ sorted_image_streams = sorted(image_streams,
+ key=lambda x: x["metadata"]["creationTimestamp"],
+ reverse=True)
+ for i_stream in sorted_image_streams:
+ docker_image_ref = i_stream["status"].get("dockerImageRepository", "")
+ if len(docker_image_ref) > 0:
+ break
+
+ if len(docker_image_ref) == 0:
+ module.exit_json(changed=False, result="no managed image found")
+
+ result, error = parse_docker_image_ref(docker_image_ref, module)
+ return result['hostname']
+
+
+class OpenShiftAdmPruneImages(AnsibleOpenshiftModule):
+ def __init__(self, **kwargs):
+ super(OpenShiftAdmPruneImages, self).__init__(**kwargs)
+
+ self.max_creation_timestamp = self.get_max_creation_timestamp()
+ self._rest_client = None
+ self.registryhost = self.params.get('registry_url')
+ self.changed = False
+
+ def list_objects(self):
+ result = {}
+ for kind, version in iteritems(ApiConfiguration):
+ namespace = None
+ if self.params.get("namespace") and kind.lower() == "imagestream":
+ namespace = self.params.get("namespace")
+ try:
+ result[kind] = self.kubernetes_facts(kind=kind,
+ api_version=version,
+ namespace=namespace).get('resources')
+ except DynamicApiError as e:
+ self.fail_json(
+ msg="An error occurred while trying to list objects.",
+ reason=e.reason,
+ status=e.status,
+ )
+ except Exception as e:
+ self.fail_json(
+ msg="An error occurred while trying to list objects.",
+ error=to_native(e)
+ )
+ return result
+
+ def get_max_creation_timestamp(self):
+ result = None
+ if self.params.get("keep_younger_than"):
+ dt_now = datetime.now(timezone.utc).replace(tzinfo=None)
+ result = dt_now - timedelta(minutes=self.params.get("keep_younger_than"))
+ return result
+
+ @property
+ def rest_client(self):
+ if not self._rest_client:
+ configuration = copy.deepcopy(self.client.configuration)
+ validate_certs = self.params.get('registry_validate_certs')
+ ssl_ca_cert = self.params.get('registry_ca_cert')
+ if validate_certs is not None:
+ configuration.verify_ssl = validate_certs
+ if ssl_ca_cert is not None:
+ configuration.ssl_ca_cert = ssl_ca_cert
+ self._rest_client = rest.RESTClientObject(configuration)
+
+ return self._rest_client
+
+ def delete_from_registry(self, url):
+ try:
+ response = self.rest_client.DELETE(url=url, headers=self.client.configuration.api_key)
+ if response.status == 404:
+ # Unable to delete layer
+ return None
+ # non-2xx/3xx response doesn't cause an error
+ if response.status < 200 or response.status >= 400:
+ return None
+ if response.status != 202 and response.status != 204:
+ self.fail_json(
+ msg="Delete URL {0}: Unexpected status code in response: {1}".format(
+ response.status, url),
+ reason=response.reason
+ )
+ return None
+ except ApiException as e:
+ if e.status != 404:
+ self.fail_json(
+ msg="Failed to delete URL: %s" % url,
+ reason=e.reason,
+ status=e.status,
+ )
+ except Exception as e:
+ self.fail_json(msg="Delete URL {0}: {1}".format(url, type(e)))
+
+ def delete_layers_links(self, path, layers):
+ for layer in layers:
+ url = "%s/v2/%s/blobs/%s" % (self.registryhost, path, layer)
+ self.changed = True
+ if not self.check_mode:
+ self.delete_from_registry(url=url)
+
+ def delete_manifests(self, path, digests):
+ for digest in digests:
+ url = "%s/v2/%s/manifests/%s" % (self.registryhost, path, digest)
+ self.changed = True
+ if not self.check_mode:
+ self.delete_from_registry(url=url)
+
+ def delete_blobs(self, blobs):
+ for blob in blobs:
+ self.changed = True
+ url = "%s/admin/blobs/%s" % (self.registryhost, blob)
+ if not self.check_mode:
+ self.delete_from_registry(url=url)
+
+ def update_image_stream_status(self, definition):
+ kind = definition["kind"]
+ api_version = definition["apiVersion"]
+ namespace = definition["metadata"]["namespace"]
+ name = definition["metadata"]["name"]
+
+ self.changed = True
+ result = definition
+ if not self.check_mode:
+ try:
+ result = self.request(
+ "PUT",
+ "/apis/{api_version}/namespaces/{namespace}/imagestreams/{name}/status".format(
+ api_version=api_version,
+ namespace=namespace,
+ name=name
+ ),
+ body=definition,
+ content_type="application/json",
+ ).to_dict()
+ except DynamicApiError as exc:
+ msg = "Failed to patch object: kind={0} {1}/{2}".format(
+ kind, namespace, name
+ )
+ self.fail_json(msg=msg, status=exc.status, reason=exc.reason)
+ except Exception as exc:
+ msg = "Failed to patch object kind={0} {1}/{2} due to: {3}".format(
+ kind, namespace, name, exc
+ )
+ self.fail_json(msg=msg, error=to_native(exc))
+ return result
+
+ def delete_image(self, image):
+ kind = "Image"
+ api_version = "image.openshift.io/v1"
+ resource = self.find_resource(kind=kind, api_version=api_version)
+ name = image["metadata"]["name"]
+ self.changed = True
+ if not self.check_mode:
+ try:
+ delete_options = client.V1DeleteOptions(grace_period_seconds=0)
+ return resource.delete(name=name, body=delete_options).to_dict()
+ except NotFoundError:
+ pass
+ except DynamicApiError as exc:
+ self.fail_json(
+ msg="Failed to delete object %s/%s due to: %s" % (
+ kind, name, exc.body
+ ),
+ reason=exc.reason,
+ status=exc.status
+ )
+ else:
+ existing = resource.get(name=name)
+ if existing:
+ existing = existing.to_dict()
+ return existing
+
+ def exceeds_limits(self, namespace, image):
+ if namespace not in self.limit_range:
+ return False
+ docker_image_metadata = image.get("dockerImageMetadata")
+ if not docker_image_metadata:
+ return False
+ docker_image_size = docker_image_metadata["Size"]
+
+ for limit in self.limit_range.get(namespace):
+ for item in limit["spec"]["limits"]:
+ if item["type"] != "openshift.io/Image":
+ continue
+ limit_max = item["max"]
+ if not limit_max:
+ continue
+ storage = limit_max["storage"]
+ if not storage:
+ continue
+ if convert_storage_to_bytes(storage) < docker_image_size:
+ # image size is larger than the permitted limit range max size
+ return True
+ return False
+
+ def prune_image_stream_tag(self, stream, tag_event_list):
+ manifests_to_delete, images_to_delete = [], []
+ filtered_items = []
+ tag_event_items = tag_event_list["items"] or []
+ prune_over_size_limit = self.params.get("prune_over_size_limit")
+ stream_namespace = stream["metadata"]["namespace"]
+ stream_name = stream["metadata"]["name"]
+ for idx, item in enumerate(tag_event_items):
+ if is_created_after(item["created"], self.max_creation_timestamp):
+ filtered_items.append(item)
+ continue
+
+ if idx == 0:
+ istag = "%s/%s:%s" % (stream_namespace,
+ stream_name,
+ tag_event_list["tag"])
+ if istag in self.used_tags:
+ # keeping because tag is used
+ filtered_items.append(item)
+ continue
+
+ if item["image"] not in self.image_mapping:
+ # There are few options why the image may not be found:
+ # 1. the image is deleted manually and this record is no longer valid
+ # 2. the imagestream was observed before the image creation, i.e.
+ # this record was created recently and it should be protected by keep_younger_than
+ continue
+
+ image = self.image_mapping[item["image"]]
+ # check prune over limit size
+ if prune_over_size_limit and not self.exceeds_limits(stream_namespace, image):
+ filtered_items.append(item)
+ continue
+
+ image_ref = "%s/%s@%s" % (stream_namespace,
+ stream_name,
+ item["image"])
+ if image_ref in self.used_images:
+ # keeping because tag is used
+ filtered_items.append(item)
+ continue
+
+ images_to_delete.append(item["image"])
+ if self.params.get('prune_registry'):
+ manifests_to_delete.append(image["metadata"]["name"])
+ path = stream_namespace + "/" + stream_name
+ image_blobs, err = get_image_blobs(image)
+ if not err:
+ self.delete_layers_links(path, image_blobs)
+
+ return filtered_items, manifests_to_delete, images_to_delete
+
+ def prune_image_streams(self, stream):
+ name = stream['metadata']['namespace'] + "/" + stream['metadata']['name']
+ if is_too_young_object(stream, self.max_creation_timestamp):
+ # keeping all images because of image stream too young
+ return None, []
+ facts = self.kubernetes_facts(kind="ImageStream",
+ api_version=ApiConfiguration.get("ImageStream"),
+ name=stream["metadata"]["name"],
+ namespace=stream["metadata"]["namespace"])
+ image_stream = facts.get('resources')
+ if len(image_stream) != 1:
+ # skipping because it does not exist anymore
+ return None, []
+ stream = image_stream[0]
+ namespace = self.params.get("namespace")
+ stream_to_update = not namespace or (stream["metadata"]["namespace"] == namespace)
+
+ manifests_to_delete, images_to_delete = [], []
+ deleted_items = False
+
+ # Update Image stream tag
+ if stream_to_update:
+ tags = stream["status"].get("tags", [])
+ for idx, tag_event_list in enumerate(tags):
+ (
+ filtered_tag_event,
+ tag_manifests_to_delete,
+ tag_images_to_delete
+ ) = self.prune_image_stream_tag(stream, tag_event_list)
+ stream['status']['tags'][idx]['items'] = filtered_tag_event
+ manifests_to_delete += tag_manifests_to_delete
+ images_to_delete += tag_images_to_delete
+ deleted_items = deleted_items or (len(tag_images_to_delete) > 0)
+
+ # Deleting tags without items
+ tags = []
+ for tag in stream["status"].get("tags", []):
+ if tag['items'] is None or len(tag['items']) == 0:
+ continue
+ tags.append(tag)
+
+ stream['status']['tags'] = tags
+ result = None
+ # Update ImageStream
+ if stream_to_update:
+ if deleted_items:
+ result = self.update_image_stream_status(stream)
+
+ if self.params.get("prune_registry"):
+ self.delete_manifests(name, manifests_to_delete)
+
+ return result, images_to_delete
+
+ def prune_images(self, image):
+ if not self.params.get("all_images"):
+ if read_object_annotation(image, "openshift.io/image.managed") != "true":
+ # keeping external image because all_images is set to false
+ # pruning only managed images
+ return None
+
+ if is_too_young_object(image, self.max_creation_timestamp):
+ # keeping because of keep_younger_than
+ return None
+
+ # Deleting image from registry
+ if self.params.get("prune_registry"):
+ image_blobs, err = get_image_blobs(image)
+ if err:
+ self.fail_json(msg=err)
+ # add blob for image name
+ image_blobs.append(image["metadata"]["name"])
+ self.delete_blobs(image_blobs)
+
+ # Delete image from cluster
+ return self.delete_image(image)
+
+ def execute_module(self):
+ resources = self.list_objects()
+ if not self.check_mode and self.params.get('prune_registry'):
+ if not self.registryhost:
+ self.registryhost = determine_host_registry(self.module, resources['Image'], resources['ImageStream'])
+ # validate that host has a scheme
+ if "://" not in self.registryhost:
+ self.registryhost = "https://" + self.registryhost
+ # Analyze Image Streams
+ analyze_ref = OpenShiftAnalyzeImageStream(
+ ignore_invalid_refs=self.params.get('ignore_invalid_refs'),
+ max_creation_timestamp=self.max_creation_timestamp,
+ module=self.module
+ )
+ self.used_tags, self.used_images, error = analyze_ref.analyze_image_stream(resources)
+ if error:
+ self.fail_json(msg=error)
+
+ # Create image mapping
+ self.image_mapping = {}
+ for m in resources["Image"]:
+ self.image_mapping[m["metadata"]["name"]] = m
+
+ # Create limit range mapping
+ self.limit_range = {}
+ for limit in resources["LimitRange"]:
+ namespace = limit["metadata"]["namespace"]
+ if namespace not in self.limit_range:
+ self.limit_range[namespace] = []
+ self.limit_range[namespace].append(limit)
+
+ # Stage 1: delete history from image streams
+ updated_image_streams = []
+ deleted_tags_images = []
+ updated_is_mapping = {}
+ for stream in resources['ImageStream']:
+ result, images_to_delete = self.prune_image_streams(stream)
+ if result:
+ updated_is_mapping[result["metadata"]["namespace"] + "/" + result["metadata"]["name"]] = result
+ updated_image_streams.append(result)
+ deleted_tags_images += images_to_delete
+
+ # Create a list with images referenced on image stream
+ self.referenced_images = []
+ for item in self.kubernetes_facts(kind="ImageStream", api_version="image.openshift.io/v1")["resources"]:
+ name = "%s/%s" % (item["metadata"]["namespace"], item["metadata"]["name"])
+ if name in updated_is_mapping:
+ item = updated_is_mapping[name]
+ for tag in item["status"].get("tags", []):
+ self.referenced_images += [t["image"] for t in tag["items"] or []]
+
+ # Stage 2: delete images
+ images = []
+ images_to_delete = [x["metadata"]["name"] for x in resources['Image']]
+ if self.params.get("namespace") is not None:
+ # When namespace is defined, prune only images that were referenced by ImageStream
+ # from the corresponding namespace
+ images_to_delete = deleted_tags_images
+ for name in images_to_delete:
+ if name in self.referenced_images:
+ # The image is referenced in one or more Image stream
+ continue
+ if name not in self.image_mapping:
+ # The image is not existing anymore
+ continue
+ result = self.prune_images(self.image_mapping[name])
+ if result:
+ images.append(result)
+
+ result = {
+ "changed": self.changed,
+ "deleted_images": images,
+ "updated_image_streams": updated_image_streams,
+ }
+ self.exit_json(**result)
diff --git a/ansible_collections/community/okd/plugins/module_utils/openshift_builds.py b/ansible_collections/community/okd/plugins/module_utils/openshift_builds.py
new file mode 100644
index 000000000..02e60fd2a
--- /dev/null
+++ b/ansible_collections/community/okd/plugins/module_utils/openshift_builds.py
@@ -0,0 +1,409 @@
+#!/usr/bin/env python
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from datetime import datetime, timezone, timedelta
+import traceback
+import time
+
+from ansible.module_utils._text import to_native
+
+from ansible_collections.community.okd.plugins.module_utils.openshift_common import AnsibleOpenshiftModule
+
+try:
+ from kubernetes.dynamic.exceptions import DynamicApiError
+except ImportError as e:
+ pass
+
+
+class OpenShiftBuilds(AnsibleOpenshiftModule):
+ def __init__(self, **kwargs):
+ super(OpenShiftBuilds, self).__init__(**kwargs)
+
+ def get_build_config(self, name, namespace):
+ params = dict(
+ kind="BuildConfig",
+ api_version="build.openshift.io/v1",
+ name=name,
+ namespace=namespace,
+ )
+ result = self.kubernetes_facts(**params)
+ return result["resources"]
+
+ def clone_build(self, name, namespace, request):
+ try:
+ result = self.request(
+ method="POST",
+ path="/apis/build.openshift.io/v1/namespaces/{namespace}/builds/{name}/clone".format(
+ namespace=namespace,
+ name=name
+ ),
+ body=request,
+ content_type="application/json",
+ )
+ return result.to_dict()
+ except DynamicApiError as exc:
+ msg = "Failed to clone Build %s/%s due to: %s" % (namespace, name, exc.body)
+ self.fail_json(msg=msg, status=exc.status, reason=exc.reason)
+ except Exception as e:
+ msg = "Failed to clone Build %s/%s due to: %s" % (namespace, name, to_native(e))
+ self.fail_json(msg=msg, error=to_native(e), exception=e)
+
+ def instantiate_build_config(self, name, namespace, request):
+ try:
+ result = self.request(
+ method="POST",
+ path="/apis/build.openshift.io/v1/namespaces/{namespace}/buildconfigs/{name}/instantiate".format(
+ namespace=namespace,
+ name=name
+ ),
+ body=request,
+ content_type="application/json",
+ )
+ return result.to_dict()
+ except DynamicApiError as exc:
+ msg = "Failed to instantiate BuildConfig %s/%s due to: %s" % (namespace, name, exc.body)
+ self.fail_json(msg=msg, status=exc.status, reason=exc.reason)
+ except Exception as e:
+ msg = "Failed to instantiate BuildConfig %s/%s due to: %s" % (namespace, name, to_native(e))
+ self.fail_json(msg=msg, error=to_native(e), exception=e)
+
+ def start_build(self):
+
+ result = None
+ name = self.params.get("build_config_name")
+ if not name:
+ name = self.params.get("build_name")
+
+ build_request = {
+ "kind": "BuildRequest",
+ "apiVersion": "build.openshift.io/v1",
+ "metadata": {
+ "name": name
+ },
+ "triggeredBy": [
+ {"message": "Manually triggered"}
+ ],
+ }
+
+ # Overrides incremental
+ incremental = self.params.get("incremental")
+ if incremental is not None:
+ build_request.update(
+ {
+ "sourceStrategyOptions": {
+ "incremental": incremental
+ }
+ }
+ )
+
+ # Environment variable
+ if self.params.get("env_vars"):
+ build_request.update(
+ {
+ "env": self.params.get("env_vars")
+ }
+ )
+
+ # Docker strategy option
+ if self.params.get("build_args"):
+ build_request.update(
+ {
+ "dockerStrategyOptions": {
+ "buildArgs": self.params.get("build_args")
+ },
+ }
+ )
+
+ # caching option
+ no_cache = self.params.get("no_cache")
+ if no_cache is not None:
+ build_request.update(
+ {
+ "dockerStrategyOptions": {
+ "noCache": no_cache
+ },
+ }
+ )
+
+ # commit
+ if self.params.get("commit"):
+ build_request.update(
+ {
+ "revision": {
+ "git": {
+ "commit": self.params.get("commit")
+ }
+ }
+ }
+ )
+
+ if self.params.get("build_config_name"):
+ # Instantiate Build from Build config
+ result = self.instantiate_build_config(
+ name=self.params.get("build_config_name"),
+ namespace=self.params.get("namespace"),
+ request=build_request
+ )
+
+ else:
+ # Re-run Build
+ result = self.clone_build(
+ name=self.params.get("build_name"),
+ namespace=self.params.get("namespace"),
+ request=build_request
+ )
+
+ if result and self.params.get("wait"):
+ start = datetime.now()
+
+ def _total_wait_time():
+ return (datetime.now() - start).seconds
+
+ wait_timeout = self.params.get("wait_timeout")
+ wait_sleep = self.params.get("wait_sleep")
+ last_status_phase = None
+ while _total_wait_time() < wait_timeout:
+ params = dict(
+ kind=result["kind"],
+ api_version=result["apiVersion"],
+ name=result["metadata"]["name"],
+ namespace=result["metadata"]["namespace"],
+ )
+ facts = self.kubernetes_facts(**params)
+ if len(facts["resources"]) > 0:
+ last_status_phase = facts["resources"][0]["status"]["phase"]
+ if last_status_phase == "Complete":
+ result = facts["resources"][0]
+ break
+ elif last_status_phase in ("Cancelled", "Error", "Failed"):
+ self.fail_json(
+ msg="Unexpected status for Build %s/%s: %s" % (
+ result["metadata"]["name"],
+ result["metadata"]["namespace"],
+ last_status_phase
+ )
+ )
+ time.sleep(wait_sleep)
+
+ if last_status_phase != "Complete":
+ name = result["metadata"]["name"]
+ namespace = result["metadata"]["namespace"]
+ msg = "Build %s/%s has not complete after %d second(s)," \
+ "current status is %s" % (namespace, name, wait_timeout, last_status_phase)
+
+ self.fail_json(msg=msg)
+
+ result = [result] if result else []
+ self.exit_json(changed=True, builds=result)
+
+ def cancel_build(self, restart):
+
+ kind = 'Build'
+ api_version = 'build.openshift.io/v1'
+
+ namespace = self.params.get("namespace")
+ phases = ["new", "pending", "running"]
+ build_phases = self.params.get("build_phases", [])
+ if build_phases:
+ phases = [p.lower() for p in build_phases]
+
+ names = []
+ if self.params.get("build_name"):
+ names.append(self.params.get("build_name"))
+ else:
+ build_config = self.params.get("build_config_name")
+ # list all builds from namespace
+ params = dict(
+ kind=kind,
+ api_version=api_version,
+ namespace=namespace
+ )
+ resources = self.kubernetes_facts(**params).get("resources", [])
+
+ def _filter_builds(build):
+ config = build["metadata"].get("labels", {}).get("openshift.io/build-config.name")
+ return build_config is None or (build_config is not None and config in build_config)
+
+ for item in list(filter(_filter_builds, resources)):
+ name = item["metadata"]["name"]
+ if name not in names:
+ names.append(name)
+
+ if len(names) == 0:
+ self.exit_json(changed=False, msg="No Build found from namespace %s" % namespace)
+
+ warning = []
+ builds_to_cancel = []
+ for name in names:
+ params = dict(
+ kind=kind,
+ api_version=api_version,
+ name=name,
+ namespace=namespace
+ )
+
+ resource = self.kubernetes_facts(**params).get("resources", [])
+ if len(resource) == 0:
+ warning.append("Build %s/%s not found" % (namespace, name))
+ continue
+
+ resource = resource[0]
+ phase = resource["status"].get("phase").lower()
+
+ # Build status.phase is matching the requested state and is not completed
+ if phase in phases:
+ builds_to_cancel.append(resource)
+ else:
+ warning.append("build %s/%s is not in expected phase, found %s" % (namespace, name, phase))
+
+ changed = False
+ result = []
+ for build in builds_to_cancel:
+ # Set cancelled to true
+ build["status"]["cancelled"] = True
+ name = build["metadata"]["name"]
+ changed = True
+ try:
+ content_type = "application/json"
+ cancelled_build = self.request(
+ "PUT",
+ "/apis/build.openshift.io/v1/namespaces/{0}/builds/{1}".format(
+ namespace, name
+ ),
+ body=build,
+ content_type=content_type,
+ ).to_dict()
+ result.append(cancelled_build)
+ except DynamicApiError as exc:
+ self.fail_json(
+ msg="Failed to cancel Build %s/%s due to: %s" % (namespace, name, exc),
+ reason=exc.reason,
+ status=exc.status
+ )
+ except Exception as e:
+ self.fail_json(
+ msg="Failed to cancel Build %s/%s due to: %s" % (namespace, name, e)
+ )
+
+ # Make sure the build phase is really cancelled.
+ def _wait_until_cancelled(build, wait_timeout, wait_sleep):
+ start = datetime.now()
+ last_phase = None
+ name = build["metadata"]["name"]
+ while (datetime.now() - start).seconds < wait_timeout:
+ params = dict(
+ kind=kind,
+ api_version=api_version,
+ name=name,
+ namespace=namespace
+ )
+ resource = self.kubernetes_facts(**params).get("resources", [])
+ if len(resource) == 0:
+ return None, "Build %s/%s not found" % (namespace, name)
+ resource = resource[0]
+ last_phase = resource["status"]["phase"]
+ if last_phase == "Cancelled":
+ return resource, None
+ time.sleep(wait_sleep)
+ return None, "Build %s/%s is not cancelled as expected, current state is %s" % (namespace, name, last_phase)
+
+ if result and self.params.get("wait"):
+ wait_timeout = self.params.get("wait_timeout")
+ wait_sleep = self.params.get("wait_sleep")
+
+ wait_result = []
+ for build in result:
+ ret, err = _wait_until_cancelled(build, wait_timeout, wait_sleep)
+ if err:
+ self.exit_json(msg=err)
+ wait_result.append(ret)
+ result = wait_result
+
+ if restart:
+ self.start_build()
+
+ self.exit_json(builds=result, changed=changed)
+
+ def execute_module(self):
+ state = self.params.get("state")
+ if state == "started":
+ self.start_build()
+ else:
+ restart = bool(state == "restarted")
+ self.cancel_build(restart=restart)
+
+
+class OpenShiftPruneBuilds(OpenShiftBuilds):
+ def __init__(self, **kwargs):
+ super(OpenShiftPruneBuilds, self).__init__(**kwargs)
+
+ def execute_module(self):
+ # list replicationcontroller candidate for pruning
+ kind = 'Build'
+ api_version = 'build.openshift.io/v1'
+ resource = self.find_resource(kind=kind, api_version=api_version, fail=True)
+
+ self.max_creation_timestamp = None
+ keep_younger_than = self.params.get("keep_younger_than")
+ if keep_younger_than:
+ now = datetime.now(timezone.utc).replace(tzinfo=None)
+ self.max_creation_timestamp = now - timedelta(minutes=keep_younger_than)
+
+ def _prunable_build(build):
+ return build["status"]["phase"] in ("Complete", "Failed", "Error", "Cancelled")
+
+ def _orphan_build(build):
+ if not _prunable_build(build):
+ return False
+
+ config = build["status"].get("config", None)
+ if not config:
+ return True
+ build_config = self.get_build_config(config["name"], config["namespace"])
+ return len(build_config) == 0
+
+ def _younger_build(build):
+ if not self.max_creation_timestamp:
+ return False
+ creation_timestamp = datetime.strptime(build['metadata']['creationTimestamp'], '%Y-%m-%dT%H:%M:%SZ')
+ return creation_timestamp < self.max_creation_timestamp
+
+ predicates = [
+ _prunable_build,
+ ]
+ if self.params.get("orphans"):
+ predicates.append(_orphan_build)
+ if self.max_creation_timestamp:
+ predicates.append(_younger_build)
+
+ # Get ReplicationController
+ params = dict(
+ kind=kind,
+ api_version=api_version,
+ namespace=self.params.get("namespace"),
+ )
+ result = self.kubernetes_facts(**params)
+ candidates = result["resources"]
+ for pred in predicates:
+ candidates = list(filter(pred, candidates))
+
+ if self.check_mode:
+ changed = len(candidates) > 0
+ self.exit_json(changed=changed, builds=candidates)
+
+ changed = False
+ for build in candidates:
+ changed = True
+ try:
+ name = build["metadata"]["name"]
+ namespace = build["metadata"]["namespace"]
+ resource.delete(name=name, namespace=namespace, body={})
+ except DynamicApiError as exc:
+ msg = "Failed to delete Build %s/%s due to: %s" % (namespace, name, exc.body)
+ self.fail_json(msg=msg, status=exc.status, reason=exc.reason)
+ except Exception as e:
+ msg = "Failed to delete Build %s/%s due to: %s" % (namespace, name, to_native(e))
+ self.fail_json(msg=msg, error=to_native(e), exception=e)
+ self.exit_json(changed=changed, builds=candidates)
diff --git a/ansible_collections/community/okd/plugins/module_utils/openshift_common.py b/ansible_collections/community/okd/plugins/module_utils/openshift_common.py
new file mode 100644
index 000000000..a1318f9a5
--- /dev/null
+++ b/ansible_collections/community/okd/plugins/module_utils/openshift_common.py
@@ -0,0 +1,93 @@
+#!/usr/bin/env python
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import traceback
+from abc import abstractmethod
+
+from ansible.module_utils._text import to_native
+
+try:
+ from ansible_collections.kubernetes.core.plugins.module_utils.k8s.client import get_api_client
+ from ansible_collections.kubernetes.core.plugins.module_utils.k8s.core import AnsibleK8SModule
+ from ansible_collections.kubernetes.core.plugins.module_utils.k8s.service import (
+ K8sService,
+ diff_objects,
+ )
+ from ansible_collections.kubernetes.core.plugins.module_utils.k8s.runner import (
+ perform_action,
+ validate,
+ )
+ from ansible_collections.kubernetes.core.plugins.module_utils.k8s.resource import (
+ create_definitions,
+ merge_params,
+ flatten_list_kind,
+ )
+ from ansible_collections.kubernetes.core.plugins.module_utils.k8s.exceptions import CoreException
+ HAS_KUBERNETES_COLLECTION = True
+ k8s_collection_import_exception = None
+ K8S_COLLECTION_ERROR = None
+except ImportError as e:
+ HAS_KUBERNETES_COLLECTION = False
+ k8s_collection_import_exception = e
+ K8S_COLLECTION_ERROR = traceback.format_exc()
+
+
+class AnsibleOpenshiftModule(AnsibleK8SModule):
+
+ def __init__(self, **kwargs):
+ super(AnsibleOpenshiftModule, self).__init__(**kwargs)
+
+ self.client = get_api_client(module=self)
+ self.fail = self.fail_json
+
+ self.svc = K8sService(self.client, self._module)
+ self.find_resource = self.svc.find_resource
+ self.kubernetes_facts = self.svc.find
+
+ if not HAS_KUBERNETES_COLLECTION:
+ self.fail_json(
+ msg="The kubernetes.core collection must be installed",
+ exception=K8S_COLLECTION_ERROR,
+ error=to_native(k8s_collection_import_exception),
+ )
+
+ @property
+ def module(self):
+ return self._module
+
+ @abstractmethod
+ def execute_module(self):
+ pass
+
+ def request(self, *args, **kwargs):
+ return self.client.client.request(*args, **kwargs)
+
+ def set_resource_definitions(self):
+ self.resource_definitions = create_definitions(self.params)
+
+ def perform_action(self, definition, params):
+ return perform_action(self.svc, definition, params)
+
+ def validate(self, definition):
+ validate(self.client, self, definition)
+
+ @staticmethod
+ def merge_params(definition, params):
+ return merge_params(definition, params)
+
+ @staticmethod
+ def flatten_list_kind(definition, params):
+ return flatten_list_kind(definition, params)
+
+ @staticmethod
+ def diff_objects(existing, new):
+ return diff_objects(existing, new)
+
+ def run_module(self):
+
+ try:
+ self.execute_module()
+ except CoreException as e:
+ self.fail_from_exception(e)
diff --git a/ansible_collections/community/okd/plugins/module_utils/openshift_docker_image.py b/ansible_collections/community/okd/plugins/module_utils/openshift_docker_image.py
new file mode 100644
index 000000000..27dbe6cc7
--- /dev/null
+++ b/ansible_collections/community/okd/plugins/module_utils/openshift_docker_image.py
@@ -0,0 +1,103 @@
+#!/usr/bin/env python
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+
+
+def convert_storage_to_bytes(value):
+ keys = {
+ "Ki": 1024,
+ "Mi": 1024 * 1024,
+ "Gi": 1024 * 1024 * 1024,
+ "Ti": 1024 * 1024 * 1024 * 1024,
+ "Pi": 1024 * 1024 * 1024 * 1024 * 1024,
+ "Ei": 1024 * 1024 * 1024 * 1024 * 1024 * 1024,
+ }
+ for k in keys:
+ if value.endswith(k) or value.endswith(k[0]):
+ idx = value.find(k[0])
+ return keys.get(k) * int(value[:idx])
+ return int(value)
+
+
+def is_valid_digest(digest):
+
+ digest_algorithm_size = dict(
+ sha256=64, sha384=96, sha512=128,
+ )
+
+ m = re.match(r'[a-zA-Z0-9-_+.]+:[a-fA-F0-9]+', digest)
+ if not m:
+ return "Docker digest does not match expected format %s" % digest
+
+ idx = digest.find(':')
+ # case: "sha256:" with no hex.
+ if idx < 0 or idx == (len(digest) - 1):
+ return "Invalid docker digest %s, no hex value define" % digest
+
+ algorithm = digest[:idx]
+ if algorithm not in digest_algorithm_size:
+ return "Unsupported digest algorithm value %s for digest %s" % (algorithm, digest)
+
+ hex_value = digest[idx + 1:]
+ if len(hex_value) != digest_algorithm_size.get(algorithm):
+ return "Invalid length for digest hex expected %d found %d (digest is %s)" % (
+ digest_algorithm_size.get(algorithm), len(hex_value), digest
+ )
+
+
+def parse_docker_image_ref(image_ref, module=None):
+ """
+ Docker Grammar Reference
+ Reference => name [ ":" tag ] [ "@" digest ]
+ name => [hostname '/'] component ['/' component]*
+ hostname => hostcomponent ['.' hostcomponent]* [':' port-number]
+ hostcomponent => /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/
+ port-number => /[0-9]+/
+ component => alpha-numeric [separator alpha-numeric]*
+ alpha-numeric => /[a-z0-9]+/
+ separator => /[_.]|__|[-]*/
+ """
+ idx = image_ref.find("/")
+
+ def _contains_any(src, values):
+ return any(x in src for x in values)
+
+ result = {
+ "tag": None, "digest": None
+ }
+ default_domain = "docker.io"
+ if idx < 0 or (not _contains_any(image_ref[:idx], ":.") and image_ref[:idx] != "localhost"):
+ result["hostname"], remainder = default_domain, image_ref
+ else:
+ result["hostname"], remainder = image_ref[:idx], image_ref[idx + 1:]
+
+ # Parse remainder information
+ idx = remainder.find("@")
+ if idx > 0 and len(remainder) > (idx + 1):
+ # docker image reference with digest
+ component, result["digest"] = remainder[:idx], remainder[idx + 1:]
+ err = is_valid_digest(result["digest"])
+ if err:
+ if module:
+ module.fail_json(msg=err)
+ return None, err
+ else:
+ idx = remainder.find(":")
+ if idx > 0 and len(remainder) > (idx + 1):
+ # docker image reference with tag
+ component, result["tag"] = remainder[:idx], remainder[idx + 1:]
+ else:
+ # name only
+ component = remainder
+ v = component.split("/")
+ namespace = None
+ if len(v) > 1:
+ namespace = v[0]
+ result.update({
+ "namespace": namespace, "name": v[-1]
+ })
+
+ return result, None
diff --git a/ansible_collections/community/okd/plugins/module_utils/openshift_groups.py b/ansible_collections/community/okd/plugins/module_utils/openshift_groups.py
new file mode 100644
index 000000000..5d1aaadc1
--- /dev/null
+++ b/ansible_collections/community/okd/plugins/module_utils/openshift_groups.py
@@ -0,0 +1,442 @@
+#!/usr/bin/env python
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+import traceback
+from datetime import datetime
+
+from ansible.module_utils.parsing.convert_bool import boolean
+from ansible.module_utils._text import to_native
+from ansible.module_utils.basic import missing_required_lib
+
+from ansible_collections.community.okd.plugins.module_utils.openshift_ldap import (
+ validate_ldap_sync_config,
+ ldap_split_host_port,
+ OpenshiftLDAPRFC2307,
+ OpenshiftLDAPActiveDirectory,
+ OpenshiftLDAPAugmentedActiveDirectory
+)
+
+try:
+ import ldap
+ HAS_PYTHON_LDAP = True
+ PYTHON_LDAP_ERROR = None
+except ImportError as e:
+ HAS_PYTHON_LDAP = False
+ PYTHON_LDAP_ERROR = e
+
+from ansible_collections.community.okd.plugins.module_utils.openshift_common import AnsibleOpenshiftModule
+
+try:
+ from kubernetes.dynamic.exceptions import DynamicApiError
+except ImportError as e:
+ pass
+
+
+LDAP_OPENSHIFT_HOST_LABEL = "openshift.io/ldap.host"
+LDAP_OPENSHIFT_URL_ANNOTATION = "openshift.io/ldap.url"
+LDAP_OPENSHIFT_UID_ANNOTATION = "openshift.io/ldap.uid"
+LDAP_OPENSHIFT_SYNCTIME_ANNOTATION = "openshift.io/ldap.sync-time"
+
+
+def connect_to_ldap(module, server_uri, bind_dn=None, bind_pw=None, insecure=True, ca_file=None):
+ if insecure:
+ ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_NEVER)
+ elif ca_file:
+ ldap.set_option(ldap.OPT_X_TLS_CACERTFILE, ca_file)
+ try:
+ connection = ldap.initialize(server_uri)
+ connection.set_option(ldap.OPT_REFERRALS, 0)
+
+ connection.simple_bind_s(bind_dn, bind_pw)
+ return connection
+ except ldap.LDAPError as e:
+ module.fail_json(msg="Cannot bind to the LDAP server '{0}' due to: {1}".format(server_uri, e))
+
+
+def validate_group_annotation(definition, host_ip):
+ name = definition['metadata']['name']
+ # Validate LDAP URL Annotation
+ annotate_url = definition['metadata'].get('annotations', {}).get(LDAP_OPENSHIFT_URL_ANNOTATION)
+ if host_ip:
+ if not annotate_url:
+ return "group '{0}' marked as having been synced did not have an '{1}' annotation".format(name, LDAP_OPENSHIFT_URL_ANNOTATION)
+ elif annotate_url != host_ip:
+ return "group '{0}' was not synchronized from: '{1}'".format(name, host_ip)
+ # Validate LDAP UID Annotation
+ annotate_uid = definition['metadata']['annotations'].get(LDAP_OPENSHIFT_UID_ANNOTATION)
+ if not annotate_uid:
+ return "group '{0}' marked as having been synced did not have an '{1}' annotation".format(name, LDAP_OPENSHIFT_UID_ANNOTATION)
+ return None
+
+
+class OpenshiftLDAPGroups(object):
+
+ kind = "Group"
+ version = "user.openshift.io/v1"
+
+ def __init__(self, module):
+ self.module = module
+ self.cache = {}
+ self.__group_api = None
+
+ @property
+ def k8s_group_api(self):
+ if not self.__group_api:
+ params = dict(
+ kind=self.kind,
+ api_version=self.version,
+ fail=True
+ )
+ self.__group_api = self.module.find_resource(**params)
+ return self.__group_api
+
+ def get_group_info(self, return_list=False, **kwargs):
+ params = dict(
+ kind=self.kind,
+ api_version=self.version,
+ )
+ params.update(kwargs)
+ result = self.module.kubernetes_facts(**params)
+ if len(result["resources"]) == 0:
+ return None
+ if len(result["resources"]) == 1 and not return_list:
+ return result["resources"][0]
+ else:
+ return result["resources"]
+
+ def list_groups(self):
+ allow_groups = self.module.params.get("allow_groups")
+ deny_groups = self.module.params.get("deny_groups")
+ name_mapping = self.module.config.get("groupUIDNameMapping")
+
+ if name_mapping and (allow_groups or deny_groups):
+
+ def _map_group_names(groups):
+ return [name_mapping.get(value, value) for value in groups]
+
+ allow_groups = _map_group_names(allow_groups)
+ deny_groups = _map_group_names(deny_groups)
+
+ host = self.module.host
+ netlocation = self.module.netlocation
+ groups = []
+ if allow_groups:
+ missing = []
+ for grp in allow_groups:
+ if grp in deny_groups:
+ continue
+ resource = self.get_group_info(name=grp)
+ if not resource:
+ missing.append(grp)
+ continue
+ groups.append(resource)
+
+ if missing:
+ self.module.fail_json(
+ msg="The following groups were not found: %s" % ''.join(missing)
+ )
+ else:
+ label_selector = "%s=%s" % (LDAP_OPENSHIFT_HOST_LABEL, host)
+ resources = self.get_group_info(label_selectors=[label_selector], return_list=True)
+ if not resources:
+ return None, "Unable to find Group matching label selector '%s'" % label_selector
+ groups = resources
+ if deny_groups:
+ groups = [item for item in groups if item["metadata"]["name"] not in deny_groups]
+
+ uids = []
+ for grp in groups:
+ err = validate_group_annotation(grp, netlocation)
+ if err and allow_groups:
+ # We raise an error for group part of the allow_group not matching LDAP sync criteria
+ return None, err
+ group_uid = grp['metadata']['annotations'].get(LDAP_OPENSHIFT_UID_ANNOTATION)
+ self.cache[group_uid] = grp
+ uids.append(group_uid)
+ return uids, None
+
+ def get_group_name_for_uid(self, group_uid):
+ if group_uid not in self.cache:
+ return None, "No mapping found for Group uid: %s" % group_uid
+ return self.cache[group_uid]["metadata"]["name"], None
+
+ def make_openshift_group(self, group_uid, group_name, usernames):
+ group = self.get_group_info(name=group_name)
+ if not group:
+ group = {
+ "apiVersion": "user.openshift.io/v1",
+ "kind": "Group",
+ "metadata": {
+ "name": group_name,
+ "labels": {
+ LDAP_OPENSHIFT_HOST_LABEL: self.module.host
+ },
+ "annotations": {
+ LDAP_OPENSHIFT_URL_ANNOTATION: self.module.netlocation,
+ LDAP_OPENSHIFT_UID_ANNOTATION: group_uid,
+ }
+ }
+ }
+
+ # Make sure we aren't taking over an OpenShift group that is already related to a different LDAP group
+ ldaphost_label = group["metadata"].get("labels", {}).get(LDAP_OPENSHIFT_HOST_LABEL)
+ if not ldaphost_label or ldaphost_label != self.module.host:
+ return None, "Group %s: %s label did not match sync host: wanted %s, got %s" % (
+ group_name, LDAP_OPENSHIFT_HOST_LABEL, self.module.host, ldaphost_label
+ )
+
+ ldapurl_annotation = group["metadata"].get("annotations", {}).get(LDAP_OPENSHIFT_URL_ANNOTATION)
+ if not ldapurl_annotation or ldapurl_annotation != self.module.netlocation:
+ return None, "Group %s: %s annotation did not match sync host: wanted %s, got %s" % (
+ group_name, LDAP_OPENSHIFT_URL_ANNOTATION, self.module.netlocation, ldapurl_annotation
+ )
+
+ ldapuid_annotation = group["metadata"].get("annotations", {}).get(LDAP_OPENSHIFT_UID_ANNOTATION)
+ if not ldapuid_annotation or ldapuid_annotation != group_uid:
+ return None, "Group %s: %s annotation did not match LDAP UID: wanted %s, got %s" % (
+ group_name, LDAP_OPENSHIFT_UID_ANNOTATION, group_uid, ldapuid_annotation
+ )
+
+ # Overwrite Group Users data
+ group["users"] = usernames
+ group["metadata"]["annotations"][LDAP_OPENSHIFT_SYNCTIME_ANNOTATION] = datetime.now().isoformat()
+ return group, None
+
+ def create_openshift_groups(self, groups: list):
+ diffs = []
+ results = []
+ changed = False
+ for definition in groups:
+ name = definition["metadata"]["name"]
+ existing = self.get_group_info(name=name)
+ if not self.module.check_mode:
+ method = "patch" if existing else "create"
+ try:
+ if existing:
+ definition = self.k8s_group_api.patch(definition).to_dict()
+ else:
+ definition = self.k8s_group_api.create(definition).to_dict()
+ except DynamicApiError as exc:
+ self.module.fail_json(msg="Failed to %s Group '%s' due to: %s" % (method, name, exc.body))
+ except Exception as exc:
+ self.module.fail_json(msg="Failed to %s Group '%s' due to: %s" % (method, name, to_native(exc)))
+ equals = False
+ if existing:
+ equals, diff = self.module.diff_objects(existing, definition)
+ diffs.append(diff)
+ changed = changed or not equals
+ results.append(definition)
+ return results, diffs, changed
+
+ def delete_openshift_group(self, name: str):
+ result = dict(
+ kind=self.kind,
+ apiVersion=self.version,
+ metadata=dict(
+ name=name
+ )
+ )
+ if not self.module.check_mode:
+ try:
+ result = self.k8s_group_api.delete(name=name).to_dict()
+ except DynamicApiError as exc:
+ self.module.fail_json(msg="Failed to delete Group '{0}' due to: {1}".format(name, exc.body))
+ except Exception as exc:
+ self.module.fail_json(msg="Failed to delete Group '{0}' due to: {1}".format(name, to_native(exc)))
+ return result
+
+
+class OpenshiftGroupsSync(AnsibleOpenshiftModule):
+
+ def __init__(self, **kwargs):
+
+ super(OpenshiftGroupsSync, self).__init__(**kwargs)
+ self.__k8s_group_api = None
+ self.__ldap_connection = None
+ self.host = None
+ self.port = None
+ self.netlocation = None
+ self.scheme = None
+ self.config = self.params.get("sync_config")
+
+ if not HAS_PYTHON_LDAP:
+ self.fail_json(
+ msg=missing_required_lib('python-ldap'), error=to_native(PYTHON_LDAP_ERROR)
+ )
+
+ @property
+ def k8s_group_api(self):
+ if not self.__k8s_group_api:
+ params = dict(
+ kind="Group",
+ api_version="user.openshift.io/v1",
+ fail=True
+ )
+ self.__k8s_group_api = self.find_resource(**params)
+ return self.__k8s_group_api
+
+ @property
+ def hostIP(self):
+ return self.netlocation
+
+ @property
+ def connection(self):
+ if not self.__ldap_connection:
+ # Create connection object
+ params = dict(
+ module=self,
+ server_uri=self.config.get('url'),
+ bind_dn=self.config.get('bindDN'),
+ bind_pw=self.config.get('bindPassword'),
+ insecure=boolean(self.config.get('insecure')),
+ ca_file=self.config.get('ca')
+ )
+ self.__ldap_connection = connect_to_ldap(**params)
+ return self.__ldap_connection
+
+ def close_connection(self):
+ if self.__ldap_connection:
+ self.__ldap_connection.unbind_s()
+ self.__ldap_connection = None
+
+ def exit_json(self, **kwargs):
+ self.close_connection()
+ self.module.exit_json(**kwargs)
+
+ def fail_json(self, **kwargs):
+ self.close_connection()
+ self.module.fail_json(**kwargs)
+
+ def get_syncer(self):
+ syncer = None
+ if "rfc2307" in self.config:
+ syncer = OpenshiftLDAPRFC2307(self.config, self.connection)
+ elif "activeDirectory" in self.config:
+ syncer = OpenshiftLDAPActiveDirectory(self.config, self.connection)
+ elif "augmentedActiveDirectory" in self.config:
+ syncer = OpenshiftLDAPAugmentedActiveDirectory(self.config, self.connection)
+ else:
+ msg = "No schema-specific config was found, should be one of 'rfc2307', 'activeDirectory', 'augmentedActiveDirectory'"
+ self.fail_json(msg=msg)
+ return syncer
+
+ def synchronize(self):
+
+ sync_group_type = self.module.params.get("type")
+
+ groups_uids = []
+ ldap_openshift_group = OpenshiftLDAPGroups(module=self)
+
+ # Get Synchronize object
+ syncer = self.get_syncer()
+
+ # Determine what to sync : list groups
+ if sync_group_type == "openshift":
+ groups_uids, err = ldap_openshift_group.list_groups()
+ if err:
+ self.fail_json(msg="Failed to list openshift groups", errors=err)
+ else:
+ # List LDAP Group to synchronize
+ groups_uids = self.params.get("allow_groups")
+ if not groups_uids:
+ groups_uids, err = syncer.list_groups()
+ if err:
+ self.module.fail_json(msg=err)
+ deny_groups = self.params.get("deny_groups")
+ if deny_groups:
+ groups_uids = [uid for uid in groups_uids if uid not in deny_groups]
+
+ openshift_groups = []
+ for uid in groups_uids:
+ # Get membership data
+ member_entries, err = syncer.extract_members(uid)
+ if err:
+ self.fail_json(msg=err)
+
+ # Determine usernames for members entries
+ usernames = []
+ for entry in member_entries:
+ name, err = syncer.get_username_for_entry(entry)
+ if err:
+ self.exit_json(
+ msg="Unable to determine username for entry %s: %s" % (entry, err)
+ )
+ if isinstance(name, list):
+ usernames.extend(name)
+ else:
+ usernames.append(name)
+ # Get group name
+ if sync_group_type == "openshift":
+ group_name, err = ldap_openshift_group.get_group_name_for_uid(uid)
+ else:
+ group_name, err = syncer.get_group_name_for_uid(uid)
+ if err:
+ self.exit_json(msg=err)
+
+ # Make Openshift group
+ group, err = ldap_openshift_group.make_openshift_group(uid, group_name, usernames)
+ if err:
+ self.fail_json(msg=err)
+ openshift_groups.append(group)
+
+ # Create Openshift Groups
+ results, diffs, changed = ldap_openshift_group.create_openshift_groups(openshift_groups)
+ self.module.exit_json(changed=True, groups=results)
+
+ def prune(self):
+ ldap_openshift_group = OpenshiftLDAPGroups(module=self)
+ groups_uids, err = ldap_openshift_group.list_groups()
+ if err:
+ self.fail_json(msg="Failed to list openshift groups", errors=err)
+
+ # Get Synchronize object
+ syncer = self.get_syncer()
+
+ changed = False
+ groups = []
+ for uid in groups_uids:
+ # Check if LDAP group exist
+ exists, err = syncer.is_ldapgroup_exists(uid)
+ if err:
+ msg = "Error determining LDAP group existence for group %s: %s" % (uid, err)
+ self.module.fail_json(msg=msg)
+
+ if exists:
+ continue
+
+ # if the LDAP entry that was previously used to create the group doesn't exist, prune it
+ group_name, err = ldap_openshift_group.get_group_name_for_uid(uid)
+ if err:
+ self.module.fail_json(msg=err)
+
+ # Delete Group
+ result = ldap_openshift_group.delete_openshift_group(group_name)
+ groups.append(result)
+ changed = True
+
+ self.exit_json(changed=changed, groups=groups)
+
+ def execute_module(self):
+ # validate LDAP sync configuration
+ error = validate_ldap_sync_config(self.config)
+ if error:
+ self.fail_json(msg="Invalid LDAP Sync config: %s" % error)
+
+ # Split host/port
+ if self.config.get('url'):
+ result, error = ldap_split_host_port(self.config.get('url'))
+ if error:
+ self.fail_json(msg="Failed to parse url='{0}': {1}".format(self.config.get('url'), error))
+ self.netlocation, self.host, self.port = result["netlocation"], result["host"], result["port"]
+ self.scheme = result["scheme"]
+
+ if self.params.get('state') == 'present':
+ self.synchronize()
+ else:
+ self.prune()
diff --git a/ansible_collections/community/okd/plugins/module_utils/openshift_images_common.py b/ansible_collections/community/okd/plugins/module_utils/openshift_images_common.py
new file mode 100644
index 000000000..67d7123e8
--- /dev/null
+++ b/ansible_collections/community/okd/plugins/module_utils/openshift_images_common.py
@@ -0,0 +1,226 @@
+#!/usr/bin/env python
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from datetime import datetime
+from ansible_collections.community.okd.plugins.module_utils.openshift_docker_image import (
+ parse_docker_image_ref,
+)
+from ansible.module_utils.six import iteritems
+
+
+def get_image_blobs(image):
+ blobs = [layer["image"] for layer in image["dockerImageLayers"] if "image" in layer]
+ docker_image_metadata = image.get("dockerImageMetadata")
+ if not docker_image_metadata:
+ return blobs, "failed to read metadata for image %s" % image["metadata"]["name"]
+ media_type_manifest = (
+ "application/vnd.docker.distribution.manifest.v2+json",
+ "application/vnd.oci.image.manifest.v1+json"
+ )
+ media_type_has_config = image['dockerImageManifestMediaType'] in media_type_manifest
+ docker_image_id = docker_image_metadata.get("Id")
+ if media_type_has_config and docker_image_id and len(docker_image_id) > 0:
+ blobs.append(docker_image_id)
+ return blobs, None
+
+
+def is_created_after(creation_timestamp, max_creation_timestamp):
+ if not max_creation_timestamp:
+ return False
+ creationTimestamp = datetime.strptime(creation_timestamp, '%Y-%m-%dT%H:%M:%SZ')
+ return creationTimestamp > max_creation_timestamp
+
+
+def is_too_young_object(obj, max_creation_timestamp):
+ return is_created_after(obj['metadata']['creationTimestamp'],
+ max_creation_timestamp)
+
+
+class OpenShiftAnalyzeImageStream(object):
+
+ def __init__(self, ignore_invalid_refs, max_creation_timestamp, module):
+
+ self.max_creationTimestamp = max_creation_timestamp
+ self.used_tags = {}
+ self.used_images = {}
+ self.ignore_invalid_refs = ignore_invalid_refs
+ self.module = module
+
+ def analyze_reference_image(self, image, referrer):
+ result, error = parse_docker_image_ref(image, self.module)
+ if error:
+ return error
+
+ if not result['hostname'] or not result['namespace']:
+ # image reference does not match hostname/namespace/name pattern - skipping
+ return None
+
+ if not result['digest']:
+ # Attempt to dereference istag. Since we cannot be sure whether the reference refers to the
+ # integrated registry or not, we ignore the host part completely. As a consequence, we may keep
+ # image otherwise sentenced for a removal just because its pull spec accidentally matches one of
+ # our imagestreamtags.
+
+ # set the tag if empty
+ if result['tag'] == "":
+ result['tag'] = 'latest'
+ key = "%s/%s:%s" % (result['namespace'], result['name'], result['tag'])
+ if key not in self.used_tags:
+ self.used_tags[key] = []
+ self.used_tags[key].append(referrer)
+ else:
+ key = "%s/%s@%s" % (result['namespace'], result['name'], result['digest'])
+ if key not in self.used_images:
+ self.used_images[key] = []
+ self.used_images[key].append(referrer)
+
+ def analyze_refs_from_pod_spec(self, podSpec, referrer):
+ for container in podSpec.get('initContainers', []) + podSpec.get('containers', []):
+ image = container.get('image')
+ if len(image.strip()) == 0:
+ # Ignoring container because it has no reference to image
+ continue
+ err = self.analyze_reference_image(image, referrer)
+ if err:
+ return err
+ return None
+
+ def analyze_refs_from_pods(self, pods):
+ for pod in pods:
+ # A pod is only *excluded* from being added to the graph if its phase is not
+ # pending or running. Additionally, it has to be at least as old as the minimum
+ # age threshold defined by the algorithm.
+ too_young = is_too_young_object(pod, self.max_creationTimestamp)
+ if pod['status']['phase'] not in ("Running", "Pending") and too_young:
+ continue
+ referrer = {
+ "kind": pod["kind"],
+ "namespace": pod["metadata"]["namespace"],
+ "name": pod["metadata"]["name"],
+ }
+ err = self.analyze_refs_from_pod_spec(pod['spec'], referrer)
+ if err:
+ return err
+ return None
+
+ def analyze_refs_pod_creators(self, resources):
+ keys = (
+ "ReplicationController", "DeploymentConfig", "DaemonSet",
+ "Deployment", "ReplicaSet", "StatefulSet", "Job", "CronJob"
+ )
+
+ for k, objects in iteritems(resources):
+ if k not in keys:
+ continue
+ for obj in objects:
+ if k == 'CronJob':
+ spec = obj["spec"]["jobTemplate"]["spec"]["template"]["spec"]
+ else:
+ spec = obj["spec"]["template"]["spec"]
+ referrer = {
+ "kind": obj["kind"],
+ "namespace": obj["metadata"]["namespace"],
+ "name": obj["metadata"]["name"],
+ }
+ err = self.analyze_refs_from_pod_spec(spec, referrer)
+ if err:
+ return err
+ return None
+
+ def analyze_refs_from_strategy(self, build_strategy, namespace, referrer):
+ # Determine 'from' reference
+ def _determine_source_strategy():
+ for src in ('sourceStrategy', 'dockerStrategy', 'customStrategy'):
+ strategy = build_strategy.get(src)
+ if strategy:
+ return strategy.get('from')
+ return None
+
+ def _parse_image_stream_image_name(name):
+ v = name.split('@')
+ if len(v) != 2:
+ return None, None, "expected exactly one @ in the isimage name %s" % name
+ name = v[0]
+ tag = v[1]
+ if len(name) == 0 or len(tag) == 0:
+ return None, None, "image stream image name %s must have a name and ID" % name
+ return name, tag, None
+
+ def _parse_image_stream_tag_name(name):
+ if "@" in name:
+ return None, None, "%s is an image stream image, not an image stream tag" % name
+ v = name.split(":")
+ if len(v) != 2:
+ return None, None, "expected exactly one : delimiter in the istag %s" % name
+ name = v[0]
+ tag = v[1]
+ if len(name) == 0 or len(tag) == 0:
+ return None, None, "image stream tag name %s must have a name and a tag" % name
+ return name, tag, None
+
+ from_strategy = _determine_source_strategy()
+ if from_strategy:
+ if from_strategy.get('kind') == "DockerImage":
+ docker_image_ref = from_strategy.get('name').strip()
+ if len(docker_image_ref) > 0:
+ err = self.analyze_reference_image(docker_image_ref, referrer)
+ elif from_strategy.get('kind') == "ImageStreamImage":
+ name, tag, error = _parse_image_stream_image_name(from_strategy.get('name'))
+ if error:
+ if not self.ignore_invalid_refs:
+ return error
+ else:
+ namespace = from_strategy.get('namespace') or namespace
+ self.used_images.append({
+ 'namespace': namespace,
+ 'name': name,
+ 'tag': tag
+ })
+ elif from_strategy.get('kind') == "ImageStreamTag":
+ name, tag, error = _parse_image_stream_tag_name(from_strategy.get('name'))
+ if error:
+ if not self.ignore_invalid_refs:
+ return error
+ else:
+ namespace = from_strategy.get('namespace') or namespace
+ self.used_tags.append({
+ 'namespace': namespace,
+ 'name': name,
+ 'tag': tag
+ })
+
+ def analyze_refs_from_build_strategy(self, resources):
+ # Json Path is always spec.strategy
+ keys = ("BuildConfig", "Build")
+ for k, objects in iteritems(resources):
+ if k not in keys:
+ continue
+ for obj in objects:
+ referrer = {
+ "kind": obj["kind"],
+ "namespace": obj["metadata"]["namespace"],
+ "name": obj["metadata"]["name"],
+ }
+ error = self.analyze_refs_from_strategy(obj['spec']['strategy'],
+ obj['metadata']['namespace'],
+ referrer)
+ if error is not None:
+ return "%s/%s/%s: %s" % (referrer["kind"], referrer["namespace"], referrer["name"], error)
+
+ def analyze_image_stream(self, resources):
+
+ # Analyze image reference from Pods
+ error = self.analyze_refs_from_pods(resources['Pod'])
+ if error:
+ return None, None, error
+
+ # Analyze image reference from Resources creating Pod
+ error = self.analyze_refs_pod_creators(resources)
+ if error:
+ return None, None, error
+
+ # Analyze image reference from Build/BuildConfig
+ error = self.analyze_refs_from_build_strategy(resources)
+ return self.used_tags, self.used_images, error
diff --git a/ansible_collections/community/okd/plugins/module_utils/openshift_import_image.py b/ansible_collections/community/okd/plugins/module_utils/openshift_import_image.py
new file mode 100644
index 000000000..01bba82af
--- /dev/null
+++ b/ansible_collections/community/okd/plugins/module_utils/openshift_import_image.py
@@ -0,0 +1,390 @@
+#!/usr/bin/env python
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import traceback
+import copy
+
+from ansible.module_utils._text import to_native
+from ansible.module_utils.parsing.convert_bool import boolean
+from ansible.module_utils.six import string_types
+
+from ansible_collections.community.okd.plugins.module_utils.openshift_common import AnsibleOpenshiftModule
+
+try:
+ from kubernetes.dynamic.exceptions import DynamicApiError
+except ImportError:
+ pass
+
+from ansible_collections.community.okd.plugins.module_utils.openshift_docker_image import (
+ parse_docker_image_ref,
+)
+
+err_stream_not_found_ref = "NotFound reference"
+
+
+def follow_imagestream_tag_reference(stream, tag):
+ multiple = False
+
+ def _imagestream_has_tag():
+ for ref in stream["spec"].get("tags", []):
+ if ref["name"] == tag:
+ return ref
+ return None
+
+ def _imagestream_split_tag(name):
+ parts = name.split(":")
+ name = parts[0]
+ tag = ""
+ if len(parts) > 1:
+ tag = parts[1]
+ if len(tag) == 0:
+ tag = "latest"
+ return name, tag, len(parts) == 2
+
+ content = []
+ err_cross_stream_ref = "tag %s points to an imagestreamtag from another ImageStream" % tag
+ while True:
+ if tag in content:
+ return tag, None, multiple, "tag %s on the image stream is a reference to same tag" % tag
+ content.append(tag)
+ tag_ref = _imagestream_has_tag()
+ if not tag_ref:
+ return None, None, multiple, err_stream_not_found_ref
+
+ if not tag_ref.get("from") or tag_ref["from"]["kind"] != "ImageStreamTag":
+ return tag, tag_ref, multiple, None
+
+ if tag_ref["from"]["namespace"] != "" and tag_ref["from"]["namespace"] != stream["metadata"]["namespace"]:
+ return tag, None, multiple, err_cross_stream_ref
+
+ # The reference needs to be followed with two format patterns:
+ # a) sameis:sometag and b) sometag
+ if ":" in tag_ref["from"]["name"]:
+ name, tagref, result = _imagestream_split_tag(tag_ref["from"]["name"])
+ if not result:
+ return tag, None, multiple, "tag %s points to an invalid imagestreamtag" % tag
+ if name != stream["metadata"]["namespace"]:
+ # anotheris:sometag - this should not happen.
+ return tag, None, multiple, err_cross_stream_ref
+ # sameis:sometag - follow the reference as sometag
+ tag = tagref
+ else:
+ tag = tag_ref["from"]["name"]
+ multiple = True
+
+
+class OpenShiftImportImage(AnsibleOpenshiftModule):
+ def __init__(self, **kwargs):
+ super(OpenShiftImportImage, self).__init__(**kwargs)
+
+ self._rest_client = None
+ self.registryhost = self.params.get('registry_url')
+ self.changed = False
+
+ ref_policy = self.params.get("reference_policy")
+ ref_policy_type = None
+ if ref_policy == "source":
+ ref_policy_type = "Source"
+ elif ref_policy == "local":
+ ref_policy_type = "Local"
+
+ self.ref_policy = {
+ "type": ref_policy_type
+ }
+
+ self.validate_certs = self.params.get("validate_registry_certs")
+ self.cluster_resources = {}
+
+ def create_image_stream_import(self, stream):
+ isi = {
+ "apiVersion": "image.openshift.io/v1",
+ "kind": "ImageStreamImport",
+ "metadata": {
+ "name": stream["metadata"]["name"],
+ "namespace": stream["metadata"]["namespace"],
+ "resourceVersion": stream["metadata"].get("resourceVersion")
+ },
+ "spec": {
+ "import": True
+ }
+ }
+
+ annotations = stream.get("annotations", {})
+ insecure = boolean(annotations.get("openshift.io/image.insecureRepository", True))
+ if self.validate_certs is not None:
+ insecure = not self.validate_certs
+ return isi, insecure
+
+ def create_image_stream_import_all(self, stream, source):
+ isi, insecure = self.create_image_stream_import(stream)
+ isi["spec"]["repository"] = {
+ "from": {
+ "kind": "DockerImage",
+ "name": source,
+ },
+ "importPolicy": {
+ "insecure": insecure,
+ "scheduled": self.params.get("scheduled")
+ },
+ "referencePolicy": self.ref_policy,
+ }
+ return isi
+
+ def create_image_stream_import_tags(self, stream, tags):
+ isi, streamInsecure = self.create_image_stream_import(stream)
+ for k in tags:
+ insecure = streamInsecure
+ scheduled = self.params.get("scheduled")
+
+ old_tag = None
+ for t in stream.get("spec", {}).get("tags", []):
+ if t["name"] == k:
+ old_tag = t
+ break
+
+ if old_tag:
+ insecure = insecure or old_tag["importPolicy"].get("insecure")
+ scheduled = scheduled or old_tag["importPolicy"].get("scheduled")
+
+ images = isi["spec"].get("images", [])
+ images.append({
+ "from": {
+ "kind": "DockerImage",
+ "name": tags.get(k),
+ },
+ "to": {
+ "name": k
+ },
+ "importPolicy": {
+ "insecure": insecure,
+ "scheduled": scheduled
+ },
+ "referencePolicy": self.ref_policy,
+ })
+ isi["spec"]["images"] = images
+ return isi
+
+ def create_image_stream(self, ref):
+ """
+ Create new ImageStream and accompanying ImageStreamImport
+ """
+ source = self.params.get("source")
+ if not source:
+ source = ref["source"]
+
+ stream = dict(
+ apiVersion="image.openshift.io/v1",
+ kind="ImageStream",
+ metadata=dict(
+ name=ref["name"],
+ namespace=self.params.get("namespace"),
+ ),
+ )
+ if self.params.get("all") and not ref["tag"]:
+ spec = dict(
+ dockerImageRepository=source
+ )
+ isi = self.create_image_stream_import_all(stream, source)
+ else:
+ spec = dict(
+ tags=[
+ {
+ "from": {
+ "kind": "DockerImage",
+ "name": source
+ },
+ "referencePolicy": self.ref_policy
+ }
+ ]
+ )
+ tags = {ref["tag"]: source}
+ isi = self.create_image_stream_import_tags(stream, tags)
+ stream.update(
+ dict(spec=spec)
+ )
+ return stream, isi
+
+ def import_all(self, istream):
+ stream = copy.deepcopy(istream)
+ # Update ImageStream appropriately
+ source = self.params.get("source")
+ docker_image_repo = stream["spec"].get("dockerImageRepository")
+ if not source:
+ if docker_image_repo:
+ source = docker_image_repo
+ else:
+ tags = {}
+ for t in stream["spec"].get("tags", []):
+ if t.get("from") and t["from"].get("kind") == "DockerImage":
+ tags[t.get("name")] = t["from"].get("name")
+ if tags == {}:
+ msg = "image stream %s/%s does not have tags pointing to external container images" % (
+ stream["metadata"]["namespace"], stream["metadata"]["name"]
+ )
+ self.fail_json(msg=msg)
+ isi = self.create_image_stream_import_tags(stream, tags)
+ return stream, isi
+
+ if source != docker_image_repo:
+ stream["spec"]["dockerImageRepository"] = source
+ isi = self.create_image_stream_import_all(stream, source)
+ return stream, isi
+
+ def import_tag(self, stream, tag):
+ source = self.params.get("source")
+
+ # Follow any referential tags to the destination
+ final_tag, existing, multiple, err = follow_imagestream_tag_reference(stream, tag)
+ if err:
+ if err == err_stream_not_found_ref:
+ # Create a new tag
+ if not source and tag == "latest":
+ source = stream["spec"].get("dockerImageRepository")
+ # if the from is still empty this means there's no such tag defined
+ # nor we can't create any from .spec.dockerImageRepository
+ if not source:
+ msg = "the tag %s does not exist on the image stream - choose an existing tag to import" % tag
+ self.fail_json(msg=msg)
+ existing = {
+ "from": {
+ "kind": "DockerImage",
+ "name": source,
+ }
+ }
+ else:
+ self.fail_json(msg=err)
+ else:
+ # Disallow re-importing anything other than DockerImage
+ if existing.get("from", {}) and existing["from"].get("kind") != "DockerImage":
+ msg = "tag {tag} points to existing {kind}/={name}, it cannot be re-imported.".format(
+ tag=tag, kind=existing["from"]["kind"], name=existing["from"]["name"]
+ )
+ # disallow changing an existing tag
+ if not existing.get("from", {}):
+ msg = "tag %s already exists - you cannot change the source using this module." % tag
+ self.fail_json(msg=msg)
+ if source and source != existing["from"]["name"]:
+ if multiple:
+ msg = "the tag {0} points to the tag {1} which points to {2} you cannot change the source using this module".format(
+ tag, final_tag, existing["from"]["name"]
+ )
+ else:
+ msg = "the tag %s points to %s you cannot change the source using this module." % (tag, final_tag)
+ self.fail_json(msg=msg)
+
+ # Set the target item to import
+ source = existing["from"].get("name")
+ if multiple:
+ tag = final_tag
+
+ # Clear the legacy annotation
+ tag_to_delete = "openshift.io/image.dockerRepositoryCheck"
+ if existing["annotations"] and tag_to_delete in existing["annotations"]:
+ del existing["annotations"][tag_to_delete]
+
+ # Reset the generation
+ existing["generation"] = 0
+
+ new_stream = copy.deepcopy(stream)
+ new_stream["spec"]["tags"] = []
+ for t in stream["spec"]["tags"]:
+ if t["name"] == tag:
+ new_stream["spec"]["tags"].append(existing)
+ else:
+ new_stream["spec"]["tags"].append(t)
+
+ # Create accompanying ImageStreamImport
+ tags = {tag: source}
+ isi = self.create_image_stream_import_tags(new_stream, tags)
+ return new_stream, isi
+
+ def create_image_import(self, ref):
+ kind = "ImageStream"
+ api_version = "image.openshift.io/v1"
+
+ # Find existing Image Stream
+ params = dict(
+ kind=kind,
+ api_version=api_version,
+ name=ref.get("name"),
+ namespace=self.params.get("namespace")
+ )
+ result = self.kubernetes_facts(**params)
+ if not result["api_found"]:
+ msg = 'Failed to find API for resource with apiVersion "{0}" and kind "{1}"'.format(
+ api_version, kind
+ ),
+ self.fail_json(msg=msg)
+ imagestream = None
+ if len(result["resources"]) > 0:
+ imagestream = result["resources"][0]
+
+ stream, isi = None, None
+ if not imagestream:
+ stream, isi = self.create_image_stream(ref)
+ elif self.params.get("all") and not ref["tag"]:
+ # importing the entire repository
+ stream, isi = self.import_all(imagestream)
+ else:
+ # importing a single tag
+ stream, isi = self.import_tag(imagestream, ref["tag"])
+ return isi
+
+ def parse_image_reference(self, image_ref):
+ result, err = parse_docker_image_ref(image_ref, self.module)
+ if result.get("digest"):
+ self.fail_json(msg="Cannot import by ID, error with definition: %s" % image_ref)
+ tag = result.get("tag") or None
+ if not self.params.get("all") and not tag:
+ tag = "latest"
+ source = self.params.get("source")
+ if not source:
+ source = image_ref
+ return dict(name=result.get("name"), tag=tag, source=image_ref)
+
+ def execute_module(self):
+
+ names = []
+ name = self.params.get("name")
+ if isinstance(name, string_types):
+ names.append(name)
+ elif isinstance(name, list):
+ names = name
+ else:
+ self.fail_json(msg="Parameter name should be provided as list or string.")
+
+ images_refs = [self.parse_image_reference(x) for x in names]
+ images_imports = []
+ for ref in images_refs:
+ isi = self.create_image_import(ref)
+ images_imports.append(isi)
+
+ # Create image import
+ kind = "ImageStreamImport"
+ api_version = "image.openshift.io/v1"
+ namespace = self.params.get("namespace")
+ try:
+ resource = self.find_resource(kind=kind, api_version=api_version, fail=True)
+ result = []
+ for isi in images_imports:
+ if not self.check_mode:
+ isi = resource.create(isi, namespace=namespace).to_dict()
+ result.append(isi)
+ self.exit_json(changed=True, result=result)
+ except DynamicApiError as exc:
+ msg = "Failed to create object {kind}/{namespace}/{name} due to: {error}".format(
+ kind=kind, namespace=namespace, name=isi["metadata"]["name"], error=exc
+ )
+ self.fail_json(
+ msg=msg,
+ error=exc.status,
+ status=exc.status,
+ reason=exc.reason,
+ )
+ except Exception as exc:
+ msg = "Failed to create object {kind}/{namespace}/{name} due to: {error}".format(
+ kind=kind, namespace=namespace, name=isi["metadata"]["name"], error=exc
+ )
+ self.fail_json(msg=msg)
diff --git a/ansible_collections/community/okd/plugins/module_utils/openshift_ldap.py b/ansible_collections/community/okd/plugins/module_utils/openshift_ldap.py
new file mode 100644
index 000000000..bb9229a72
--- /dev/null
+++ b/ansible_collections/community/okd/plugins/module_utils/openshift_ldap.py
@@ -0,0 +1,777 @@
+#!/usr/bin/env python
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+import os
+import copy
+
+from ansible.module_utils.parsing.convert_bool import boolean
+from ansible.module_utils.six import iteritems
+
+try:
+ import ldap
+except ImportError as e:
+ pass
+
+
+LDAP_SEARCH_OUT_OF_SCOPE_ERROR = "trying to search by DN for an entry that exists outside of the tree specified with the BaseDN for search"
+
+
+def validate_ldap_sync_config(config):
+ # Validate url
+ url = config.get('url')
+ if not url:
+ return "url should be non empty attribute."
+
+ # Make sure bindDN and bindPassword are both set, or both unset
+ bind_dn = config.get('bindDN', "")
+ bind_password = config.get('bindPassword', "")
+ if (len(bind_dn) == 0) != (len(bind_password) == 0):
+ return "bindDN and bindPassword must both be specified, or both be empty."
+
+ insecure = boolean(config.get('insecure'))
+ ca_file = config.get('ca')
+ if insecure:
+ if url.startswith('ldaps://'):
+ return "Cannot use ldaps scheme with insecure=true."
+ if ca_file:
+ return "Cannot specify a ca with insecure=true."
+ elif ca_file and not os.path.isfile(ca_file):
+ return "could not read ca file: {0}.".format(ca_file)
+
+ nameMapping = config.get('groupUIDNameMapping', {})
+ for k, v in iteritems(nameMapping):
+ if len(k) == 0 or len(v) == 0:
+ return "groupUIDNameMapping has empty key or value"
+
+ schemas = []
+ schema_list = ('rfc2307', 'activeDirectory', 'augmentedActiveDirectory')
+ for schema in schema_list:
+ if schema in config:
+ schemas.append(schema)
+
+ if len(schemas) == 0:
+ return "No schema-specific config was provided, should be one of %s" % ", ".join(schema_list)
+ if len(schemas) > 1:
+ return "Exactly one schema-specific config is required; found (%d) %s" % (len(schemas), ','.join(schemas))
+
+ if schemas[0] == 'rfc2307':
+ return validate_RFC2307(config.get("rfc2307"))
+ elif schemas[0] == 'activeDirectory':
+ return validate_ActiveDirectory(config.get("activeDirectory"))
+ elif schemas[0] == 'augmentedActiveDirectory':
+ return validate_AugmentedActiveDirectory(config.get("augmentedActiveDirectory"))
+
+
+def validate_ldap_query(qry, isDNOnly=False):
+
+ # validate query scope
+ scope = qry.get('scope')
+ if scope and scope not in ("", "sub", "one", "base"):
+ return "invalid scope %s" % scope
+
+ # validate deref aliases
+ derefAlias = qry.get('derefAliases')
+ if derefAlias and derefAlias not in ("never", "search", "base", "always"):
+ return "not a valid LDAP alias dereferncing behavior: %s", derefAlias
+
+ # validate timeout
+ timeout = qry.get('timeout')
+ if timeout and float(timeout) < 0:
+ return "timeout must be equal to or greater than zero"
+
+ # Validate DN only
+ qry_filter = qry.get('filter', "")
+ if isDNOnly:
+ if len(qry_filter) > 0:
+ return 'cannot specify a filter when using "dn" as the UID attribute'
+ else:
+ # validate filter
+ if len(qry_filter) == 0 or qry_filter[0] != '(':
+ return "filter does not start with an '('"
+ return None
+
+
+def validate_RFC2307(config):
+ qry = config.get('groupsQuery')
+ if not qry or not isinstance(qry, dict):
+ return "RFC2307: groupsQuery requires a dictionary"
+ error = validate_ldap_query(qry)
+ if not error:
+ return error
+ for field in ('groupUIDAttribute', 'groupNameAttributes', 'groupMembershipAttributes',
+ 'userUIDAttribute', 'userNameAttributes'):
+ value = config.get(field)
+ if not value:
+ return "RFC2307: {0} is required.".format(field)
+
+ users_qry = config.get('usersQuery')
+ if not users_qry or not isinstance(users_qry, dict):
+ return "RFC2307: usersQuery requires a dictionary"
+
+ isUserDNOnly = (config.get('userUIDAttribute').strip() == 'dn')
+ return validate_ldap_query(users_qry, isDNOnly=isUserDNOnly)
+
+
+def validate_ActiveDirectory(config, label="ActiveDirectory"):
+ users_qry = config.get('usersQuery')
+ if not users_qry or not isinstance(users_qry, dict):
+ return "{0}: usersQuery requires as dictionnary".format(label)
+ error = validate_ldap_query(users_qry)
+ if not error:
+ return error
+
+ for field in ('userNameAttributes', 'groupMembershipAttributes'):
+ value = config.get(field)
+ if not value:
+ return "{0}: {1} is required.".format(field, label)
+
+ return None
+
+
+def validate_AugmentedActiveDirectory(config):
+ error = validate_ActiveDirectory(config, label="AugmentedActiveDirectory")
+ if not error:
+ return error
+ for field in ('groupUIDAttribute', 'groupNameAttributes'):
+ value = config.get(field)
+ if not value:
+ return "AugmentedActiveDirectory: {0} is required".format(field)
+ groups_qry = config.get('groupsQuery')
+ if not groups_qry or not isinstance(groups_qry, dict):
+ return "AugmentedActiveDirectory: groupsQuery requires as dictionnary."
+
+ isGroupDNOnly = (config.get('groupUIDAttribute').strip() == 'dn')
+ return validate_ldap_query(groups_qry, isDNOnly=isGroupDNOnly)
+
+
+def determine_ldap_scope(scope):
+ if scope in ("", "sub"):
+ return ldap.SCOPE_SUBTREE
+ elif scope == 'base':
+ return ldap.SCOPE_BASE
+ elif scope == 'one':
+ return ldap.SCOPE_ONELEVEL
+ return None
+
+
+def determine_deref_aliases(derefAlias):
+ mapping = {
+ "never": ldap.DEREF_NEVER,
+ "search": ldap.DEREF_SEARCHING,
+ "base": ldap.DEREF_FINDING,
+ "always": ldap.DEREF_ALWAYS,
+ }
+ result = None
+ if derefAlias in mapping:
+ result = mapping.get(derefAlias)
+ return result
+
+
+def openshift_ldap_build_base_query(config):
+ qry = {}
+ if config.get('baseDN'):
+ qry['base'] = config.get('baseDN')
+
+ scope = determine_ldap_scope(config.get('scope'))
+ if scope:
+ qry['scope'] = scope
+
+ pageSize = config.get('pageSize')
+ if pageSize and int(pageSize) > 0:
+ qry['sizelimit'] = int(pageSize)
+
+ timeout = config.get('timeout')
+ if timeout and int(timeout) > 0:
+ qry['timeout'] = int(timeout)
+
+ filter = config.get('filter')
+ if filter:
+ qry['filterstr'] = filter
+
+ derefAlias = determine_deref_aliases(config.get('derefAliases'))
+ if derefAlias:
+ qry['derefAlias'] = derefAlias
+ return qry
+
+
+def openshift_ldap_get_attribute_for_entry(entry, attribute):
+ attributes = [attribute]
+ if isinstance(attribute, list):
+ attributes = attribute
+ for k in attributes:
+ if k.lower() == 'dn':
+ return entry[0]
+ v = entry[1].get(k, None)
+ if v:
+ if isinstance(v, list):
+ result = []
+ for x in v:
+ if hasattr(x, 'decode'):
+ result.append(x.decode('utf-8'))
+ else:
+ result.append(x)
+ return result
+ else:
+ return v.decode('utf-8') if hasattr(v, 'decode') else v
+ return ""
+
+
+def ldap_split_host_port(hostport):
+ """
+ ldap_split_host_port splits a network address of the form "host:port",
+ "host%zone:port", "[host]:port" or "[host%zone]:port" into host or
+ host%zone and port.
+ """
+ result = dict(
+ scheme=None, netlocation=None, host=None, port=None
+ )
+ if not hostport:
+ return result, None
+
+ # Extract Scheme
+ netlocation = hostport
+ scheme_l = "://"
+ if "://" in hostport:
+ idx = hostport.find(scheme_l)
+ result["scheme"] = hostport[:idx]
+ netlocation = hostport[idx + len(scheme_l):]
+ result["netlocation"] = netlocation
+
+ if netlocation[-1] == ']':
+ # ipv6 literal (with no port)
+ result["host"] = netlocation
+
+ v = netlocation.rsplit(":", 1)
+ if len(v) != 1:
+ try:
+ result["port"] = int(v[1])
+ except ValueError:
+ return None, "Invalid value specified for port: %s" % v[1]
+ result["host"] = v[0]
+ return result, None
+
+
+def openshift_ldap_query_for_entries(connection, qry, unique_entry=True):
+ # set deref alias (TODO: need to set a default value to reset for each transaction)
+ derefAlias = qry.pop('derefAlias', None)
+ if derefAlias:
+ ldap.set_option(ldap.OPT_DEREF, derefAlias)
+ try:
+ result = connection.search_ext_s(**qry)
+ if not result or len(result) == 0:
+ return None, "Entry not found for base='{0}' and filter='{1}'".format(qry['base'], qry['filterstr'])
+ if len(result) > 1 and unique_entry:
+ if qry.get('scope') == ldap.SCOPE_BASE:
+ return None, "multiple entries found matching dn={0}: {1}".format(qry['base'], result)
+ else:
+ return None, "multiple entries found matching filter {0}: {1}".format(qry['filterstr'], result)
+ return result, None
+ except ldap.NO_SUCH_OBJECT:
+ return None, "search for entry with base dn='{0}' refers to a non-existent entry".format(qry['base'])
+
+
+def openshift_equal_dn_objects(dn_obj, other_dn_obj):
+ if len(dn_obj) != len(other_dn_obj):
+ return False
+
+ for k, v in enumerate(dn_obj):
+ if len(v) != len(other_dn_obj[k]):
+ return False
+ for j, item in enumerate(v):
+ if not (item == other_dn_obj[k][j]):
+ return False
+ return True
+
+
+def openshift_equal_dn(dn, other):
+ dn_obj = ldap.dn.str2dn(dn)
+ other_dn_obj = ldap.dn.str2dn(other)
+
+ return openshift_equal_dn_objects(dn_obj, other_dn_obj)
+
+
+def openshift_ancestorof_dn(dn, other):
+ dn_obj = ldap.dn.str2dn(dn)
+ other_dn_obj = ldap.dn.str2dn(other)
+
+ if len(dn_obj) >= len(other_dn_obj):
+ return False
+ # Take the last attribute from the other DN to compare against
+ return openshift_equal_dn_objects(dn_obj, other_dn_obj[len(other_dn_obj) - len(dn_obj):])
+
+
+class OpenshiftLDAPQueryOnAttribute(object):
+ def __init__(self, qry, attribute):
+ # qry retrieves entries from an LDAP server
+ self.qry = copy.deepcopy(qry)
+ # query_attributes is the attribute for a specific filter that, when conjoined with the common filter,
+ # retrieves the specific LDAP entry from the LDAP server. (e.g. "cn", when formatted with "aGroupName"
+ # and conjoined with "objectClass=groupOfNames", becomes (&(objectClass=groupOfNames)(cn=aGroupName))")
+ self.query_attribute = attribute
+
+ @staticmethod
+ def escape_filter(buffer):
+ """
+ escapes from the provided LDAP filter string the special
+ characters in the set '(', ')', '*', \\ and those out of the range 0 < c < 0x80, as defined in RFC4515.
+ """
+ output = []
+ hex_string = "0123456789abcdef"
+ for c in buffer:
+ if ord(c) > 0x7f or c in ('(', ')', '\\', '*') or c == 0:
+ first = ord(c) >> 4
+ second = ord(c) & 0xf
+ output += ['\\', hex_string[first], hex_string[second]]
+ else:
+ output.append(c)
+ return ''.join(output)
+
+ def build_request(self, ldapuid, attributes):
+ params = copy.deepcopy(self.qry)
+ if self.query_attribute.lower() == 'dn':
+ if ldapuid:
+ if not openshift_equal_dn(ldapuid, params['base']) and not openshift_ancestorof_dn(params['base'], ldapuid):
+ return None, LDAP_SEARCH_OUT_OF_SCOPE_ERROR
+ params['base'] = ldapuid
+ params['scope'] = ldap.SCOPE_BASE
+ # filter that returns all values
+ params['filterstr'] = "(objectClass=*)"
+ params['attrlist'] = attributes
+ else:
+ # Builds the query containing a filter that conjoins the common filter given
+ # in the configuration with the specific attribute filter for which the attribute value is given
+ specificFilter = "%s=%s" % (self.escape_filter(self.query_attribute), self.escape_filter(ldapuid))
+ qry_filter = params.get('filterstr', None)
+ if qry_filter:
+ params['filterstr'] = "(&%s(%s))" % (qry_filter, specificFilter)
+ params['attrlist'] = attributes
+ return params, None
+
+ def ldap_search(self, connection, ldapuid, required_attributes, unique_entry=True):
+ query, error = self.build_request(ldapuid, required_attributes)
+ if error:
+ return None, error
+ # set deref alias (TODO: need to set a default value to reset for each transaction)
+ derefAlias = query.pop('derefAlias', None)
+ if derefAlias:
+ ldap.set_option(ldap.OPT_DEREF, derefAlias)
+
+ try:
+ result = connection.search_ext_s(**query)
+ if not result or len(result) == 0:
+ return None, "Entry not found for base='{0}' and filter='{1}'".format(query['base'], query['filterstr'])
+ if unique_entry:
+ if len(result) > 1:
+ return None, "Multiple Entries found matching search criteria: %s (%s)" % (query, result)
+ result = result[0]
+ return result, None
+ except ldap.NO_SUCH_OBJECT:
+ return None, "Entry not found for base='{0}' and filter='{1}'".format(query['base'], query['filterstr'])
+ except Exception as err:
+ return None, "Request %s failed due to: %s" % (query, err)
+
+
+class OpenshiftLDAPQuery(object):
+ def __init__(self, qry):
+ # Query retrieves entries from an LDAP server
+ self.qry = qry
+
+ def build_request(self, attributes):
+ params = copy.deepcopy(self.qry)
+ params['attrlist'] = attributes
+ return params
+
+ def ldap_search(self, connection, required_attributes):
+ query = self.build_request(required_attributes)
+ # set deref alias (TODO: need to set a default value to reset for each transaction)
+ derefAlias = query.pop('derefAlias', None)
+ if derefAlias:
+ ldap.set_option(ldap.OPT_DEREF, derefAlias)
+
+ try:
+ result = connection.search_ext_s(**query)
+ if not result or len(result) == 0:
+ return None, "Entry not found for base='{0}' and filter='{1}'".format(query['base'], query['filterstr'])
+ return result, None
+ except ldap.NO_SUCH_OBJECT:
+ return None, "search for entry with base dn='{0}' refers to a non-existent entry".format(query['base'])
+
+
+class OpenshiftLDAPInterface(object):
+
+ def __init__(self, connection, groupQuery, groupNameAttributes, groupMembershipAttributes,
+ userQuery, userNameAttributes, config):
+
+ self.connection = connection
+ self.groupQuery = copy.deepcopy(groupQuery)
+ self.groupNameAttributes = groupNameAttributes
+ self.groupMembershipAttributes = groupMembershipAttributes
+ self.userQuery = copy.deepcopy(userQuery)
+ self.userNameAttributes = userNameAttributes
+ self.config = config
+
+ self.tolerate_not_found = boolean(config.get('tolerateMemberNotFoundErrors', False))
+ self.tolerate_out_of_scope = boolean(config.get('tolerateMemberOutOfScopeErrors', False))
+
+ self.required_group_attributes = [self.groupQuery.query_attribute]
+ for x in self.groupNameAttributes + self.groupMembershipAttributes:
+ if x not in self.required_group_attributes:
+ self.required_group_attributes.append(x)
+
+ self.required_user_attributes = [self.userQuery.query_attribute]
+ for x in self.userNameAttributes:
+ if x not in self.required_user_attributes:
+ self.required_user_attributes.append(x)
+
+ self.cached_groups = {}
+ self.cached_users = {}
+
+ def get_group_entry(self, uid):
+ """
+ get_group_entry returns an LDAP group entry for the given group UID by searching the internal cache
+ of the LDAPInterface first, then sending an LDAP query if the cache did not contain the entry.
+ """
+ if uid in self.cached_groups:
+ return self.cached_groups.get(uid), None
+
+ group, err = self.groupQuery.ldap_search(self.connection, uid, self.required_group_attributes)
+ if err:
+ return None, err
+ self.cached_groups[uid] = group
+ return group, None
+
+ def get_user_entry(self, uid):
+ """
+ get_user_entry returns an LDAP group entry for the given user UID by searching the internal cache
+ of the LDAPInterface first, then sending an LDAP query if the cache did not contain the entry.
+ """
+ if uid in self.cached_users:
+ return self.cached_users.get(uid), None
+
+ entry, err = self.userQuery.ldap_search(self.connection, uid, self.required_user_attributes)
+ if err:
+ return None, err
+ self.cached_users[uid] = entry
+ return entry, None
+
+ def exists(self, ldapuid):
+ group, error = self.get_group_entry(ldapuid)
+ return bool(group), error
+
+ def list_groups(self):
+ group_qry = copy.deepcopy(self.groupQuery.qry)
+ group_qry['attrlist'] = self.required_group_attributes
+
+ groups, err = openshift_ldap_query_for_entries(
+ connection=self.connection,
+ qry=group_qry,
+ unique_entry=False
+ )
+ if err:
+ return None, err
+
+ group_uids = []
+ for entry in groups:
+ uid = openshift_ldap_get_attribute_for_entry(entry, self.groupQuery.query_attribute)
+ if not uid:
+ return None, "Unable to find LDAP group uid for entry %s" % entry
+ self.cached_groups[uid] = entry
+ group_uids.append(uid)
+ return group_uids, None
+
+ def extract_members(self, uid):
+ """
+ returns the LDAP member entries for a group specified with a ldapGroupUID
+ """
+ # Get group entry from LDAP
+ group, err = self.get_group_entry(uid)
+ if err:
+ return None, err
+
+ # Extract member UIDs from group entry
+ member_uids = []
+ for attribute in self.groupMembershipAttributes:
+ member_uids += openshift_ldap_get_attribute_for_entry(group, attribute)
+
+ members = []
+ for user_uid in member_uids:
+ entry, err = self.get_user_entry(user_uid)
+ if err:
+ if self.tolerate_not_found and err.startswith("Entry not found"):
+ continue
+ elif err == LDAP_SEARCH_OUT_OF_SCOPE_ERROR:
+ continue
+ return None, err
+ members.append(entry)
+
+ return members, None
+
+
+class OpenshiftLDAPRFC2307(object):
+
+ def __init__(self, config, ldap_connection):
+
+ self.config = config
+ self.ldap_interface = self.create_ldap_interface(ldap_connection)
+
+ def create_ldap_interface(self, connection):
+ segment = self.config.get("rfc2307")
+ groups_base_qry = openshift_ldap_build_base_query(segment['groupsQuery'])
+ users_base_qry = openshift_ldap_build_base_query(segment['usersQuery'])
+
+ groups_query = OpenshiftLDAPQueryOnAttribute(groups_base_qry, segment['groupUIDAttribute'])
+ users_query = OpenshiftLDAPQueryOnAttribute(users_base_qry, segment['userUIDAttribute'])
+
+ params = dict(
+ connection=connection,
+ groupQuery=groups_query,
+ groupNameAttributes=segment['groupNameAttributes'],
+ groupMembershipAttributes=segment['groupMembershipAttributes'],
+ userQuery=users_query,
+ userNameAttributes=segment['userNameAttributes'],
+ config=segment
+ )
+ return OpenshiftLDAPInterface(**params)
+
+ def get_username_for_entry(self, entry):
+ username = openshift_ldap_get_attribute_for_entry(entry, self.ldap_interface.userNameAttributes)
+ if not username:
+ return None, "The user entry (%s) does not map to a OpenShift User name with the given mapping" % entry
+ return username, None
+
+ def get_group_name_for_uid(self, uid):
+
+ # Get name from User defined mapping
+ groupuid_name_mapping = self.config.get("groupUIDNameMapping")
+ if groupuid_name_mapping and uid in groupuid_name_mapping:
+ return groupuid_name_mapping.get(uid), None
+ elif self.ldap_interface.groupNameAttributes:
+ group, err = self.ldap_interface.get_group_entry(uid)
+ if err:
+ return None, err
+ group_name = openshift_ldap_get_attribute_for_entry(group, self.ldap_interface.groupNameAttributes)
+ if not group_name:
+ error = "The group entry (%s) does not map to an OpenShift Group name with the given name attribute (%s)" % (
+ group, self.ldap_interface.groupNameAttributes
+ )
+ return None, error
+ if isinstance(group_name, list):
+ group_name = group_name[0]
+ return group_name, None
+ else:
+ return None, "No OpenShift Group name defined for LDAP group UID: %s" % uid
+
+ def is_ldapgroup_exists(self, uid):
+ group, err = self.ldap_interface.get_group_entry(uid)
+ if err:
+ if err == LDAP_SEARCH_OUT_OF_SCOPE_ERROR or err.startswith("Entry not found") or "non-existent entry" in err:
+ return False, None
+ return False, err
+ if group:
+ return True, None
+ return False, None
+
+ def list_groups(self):
+ return self.ldap_interface.list_groups()
+
+ def extract_members(self, uid):
+ return self.ldap_interface.extract_members(uid)
+
+
+class OpenshiftLDAP_ADInterface(object):
+
+ def __init__(self, connection, user_query, group_member_attr, user_name_attr):
+ self.connection = connection
+ self.userQuery = user_query
+ self.groupMembershipAttributes = group_member_attr
+ self.userNameAttributes = user_name_attr
+
+ self.required_user_attributes = self.userNameAttributes or []
+ for attr in self.groupMembershipAttributes:
+ if attr not in self.required_user_attributes:
+ self.required_user_attributes.append(attr)
+
+ self.cache = {}
+ self.cache_populated = False
+
+ def is_entry_present(self, cache_item, entry):
+ for item in cache_item:
+ if item[0] == entry[0]:
+ return True
+ return False
+
+ def populate_cache(self):
+ if not self.cache_populated:
+ self.cache_populated = True
+ entries, err = self.userQuery.ldap_search(self.connection, self.required_user_attributes)
+ if err:
+ return err
+
+ for entry in entries:
+ for group_attr in self.groupMembershipAttributes:
+ uids = openshift_ldap_get_attribute_for_entry(entry, group_attr)
+ if not isinstance(uids, list):
+ uids = [uids]
+ for uid in uids:
+ if uid not in self.cache:
+ self.cache[uid] = []
+ if not self.is_entry_present(self.cache[uid], entry):
+ self.cache[uid].append(entry)
+ return None
+
+ def list_groups(self):
+ err = self.populate_cache()
+ if err:
+ return None, err
+ result = []
+ if self.cache:
+ result = self.cache.keys()
+ return result, None
+
+ def extract_members(self, uid):
+ # ExtractMembers returns the LDAP member entries for a group specified with a ldapGroupUID
+ # if we already have it cached, return the cached value
+ if uid in self.cache:
+ return self.cache[uid], None
+
+ # This happens in cases where we did not list out every group.
+ # In that case, we're going to be asked about specific groups.
+ users_in_group = []
+ for attr in self.groupMembershipAttributes:
+ query_on_attribute = OpenshiftLDAPQueryOnAttribute(self.userQuery.qry, attr)
+ entries, error = query_on_attribute.ldap_search(self.connection, uid, self.required_user_attributes, unique_entry=False)
+ if error and "not found" not in error:
+ return None, error
+ if not entries:
+ continue
+
+ for entry in entries:
+ if not self.is_entry_present(users_in_group, entry):
+ users_in_group.append(entry)
+
+ self.cache[uid] = users_in_group
+ return users_in_group, None
+
+
+class OpenshiftLDAPActiveDirectory(object):
+
+ def __init__(self, config, ldap_connection):
+
+ self.config = config
+ self.ldap_interface = self.create_ldap_interface(ldap_connection)
+
+ def create_ldap_interface(self, connection):
+ segment = self.config.get("activeDirectory")
+ base_query = openshift_ldap_build_base_query(segment['usersQuery'])
+ user_query = OpenshiftLDAPQuery(base_query)
+
+ return OpenshiftLDAP_ADInterface(
+ connection=connection,
+ user_query=user_query,
+ group_member_attr=segment["groupMembershipAttributes"],
+ user_name_attr=segment["userNameAttributes"],
+ )
+
+ def get_username_for_entry(self, entry):
+ username = openshift_ldap_get_attribute_for_entry(entry, self.ldap_interface.userNameAttributes)
+ if not username:
+ return None, "The user entry (%s) does not map to a OpenShift User name with the given mapping" % entry
+ return username, None
+
+ def get_group_name_for_uid(self, uid):
+ return uid, None
+
+ def is_ldapgroup_exists(self, uid):
+ members, error = self.extract_members(uid)
+ if error:
+ return False, error
+ exists = members and len(members) > 0
+ return exists, None
+
+ def list_groups(self):
+ return self.ldap_interface.list_groups()
+
+ def extract_members(self, uid):
+ return self.ldap_interface.extract_members(uid)
+
+
+class OpenshiftLDAP_AugmentedADInterface(OpenshiftLDAP_ADInterface):
+
+ def __init__(self, connection, user_query, group_member_attr, user_name_attr, group_qry, group_name_attr):
+ super(OpenshiftLDAP_AugmentedADInterface, self).__init__(
+ connection, user_query, group_member_attr, user_name_attr
+ )
+ self.groupQuery = copy.deepcopy(group_qry)
+ self.groupNameAttributes = group_name_attr
+
+ self.required_group_attributes = [self.groupQuery.query_attribute]
+ for x in self.groupNameAttributes:
+ if x not in self.required_group_attributes:
+ self.required_group_attributes.append(x)
+
+ self.cached_groups = {}
+
+ def get_group_entry(self, uid):
+ """
+ get_group_entry returns an LDAP group entry for the given group UID by searching the internal cache
+ of the LDAPInterface first, then sending an LDAP query if the cache did not contain the entry.
+ """
+ if uid in self.cached_groups:
+ return self.cached_groups.get(uid), None
+
+ group, err = self.groupQuery.ldap_search(self.connection, uid, self.required_group_attributes)
+ if err:
+ return None, err
+ self.cached_groups[uid] = group
+ return group, None
+
+ def exists(self, ldapuid):
+ # Get group members
+ members, error = self.extract_members(ldapuid)
+ if error:
+ return False, error
+ group_exists = bool(members)
+
+ # Check group Existence
+ entry, error = self.get_group_entry(ldapuid)
+ if error:
+ if "not found" in error:
+ return False, None
+ else:
+ return False, error
+ else:
+ return group_exists and bool(entry), None
+
+
+class OpenshiftLDAPAugmentedActiveDirectory(OpenshiftLDAPRFC2307):
+
+ def __init__(self, config, ldap_connection):
+
+ self.config = config
+ self.ldap_interface = self.create_ldap_interface(ldap_connection)
+
+ def create_ldap_interface(self, connection):
+ segment = self.config.get("augmentedActiveDirectory")
+ user_base_query = openshift_ldap_build_base_query(segment['usersQuery'])
+ groups_base_qry = openshift_ldap_build_base_query(segment['groupsQuery'])
+
+ user_query = OpenshiftLDAPQuery(user_base_query)
+ groups_query = OpenshiftLDAPQueryOnAttribute(groups_base_qry, segment['groupUIDAttribute'])
+
+ return OpenshiftLDAP_AugmentedADInterface(
+ connection=connection,
+ user_query=user_query,
+ group_member_attr=segment["groupMembershipAttributes"],
+ user_name_attr=segment["userNameAttributes"],
+ group_qry=groups_query,
+ group_name_attr=segment["groupNameAttributes"]
+ )
+
+ def is_ldapgroup_exists(self, uid):
+ return self.ldap_interface.exists(uid)
diff --git a/ansible_collections/community/okd/plugins/module_utils/openshift_process.py b/ansible_collections/community/okd/plugins/module_utils/openshift_process.py
new file mode 100644
index 000000000..6fa69d13c
--- /dev/null
+++ b/ansible_collections/community/okd/plugins/module_utils/openshift_process.py
@@ -0,0 +1,199 @@
+#!/usr/bin/env python
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import traceback
+
+from ansible.module_utils._text import to_native
+
+
+from ansible_collections.community.okd.plugins.module_utils.openshift_common import AnsibleOpenshiftModule
+
+try:
+ from kubernetes.dynamic.exceptions import DynamicApiError
+except ImportError:
+ pass
+
+
+class OpenShiftProcess(AnsibleOpenshiftModule):
+ def __init__(self, **kwargs):
+ super(OpenShiftProcess, self).__init__(**kwargs)
+
+ def execute_module(self):
+ v1_templates = self.find_resource(
+ "templates", "template.openshift.io/v1", fail=True
+ )
+ v1_processed_templates = self.find_resource(
+ "processedtemplates", "template.openshift.io/v1", fail=True
+ )
+
+ name = self.params.get("name")
+ namespace = self.params.get("namespace")
+ namespace_target = self.params.get("namespace_target")
+ definition = self.params.get("resource_definition")
+ src = self.params.get("src")
+
+ state = self.params.get("state")
+
+ parameters = self.params.get("parameters") or {}
+ parameter_file = self.params.get("parameter_file")
+
+ if (name and definition) or (name and src) or (src and definition):
+ self.fail_json("Only one of src, name, or definition may be provided")
+
+ if name and not namespace:
+ self.fail_json("namespace is required when name is set")
+
+ template = None
+
+ if src or definition:
+ self.set_resource_definitions()
+ if len(self.resource_definitions) < 1:
+ self.fail_json(
+ "Unable to load a Template resource from src or resource_definition"
+ )
+ elif len(self.resource_definitions) > 1:
+ self.fail_json(
+ "Multiple Template resources found in src or resource_definition, only one Template may be processed at a time"
+ )
+ template = self.resource_definitions[0]
+ template_namespace = template.get("metadata", {}).get("namespace")
+ namespace = template_namespace or namespace or namespace_target or "default"
+ elif name and namespace:
+ try:
+ template = v1_templates.get(name=name, namespace=namespace).to_dict()
+ except DynamicApiError as exc:
+ self.fail_json(
+ msg="Failed to retrieve Template with name '{0}' in namespace '{1}': {2}".format(
+ name, namespace, exc.body
+ ),
+ error=exc.status,
+ status=exc.status,
+ reason=exc.reason,
+ )
+ except Exception as exc:
+ self.fail_json(
+ msg="Failed to retrieve Template with name '{0}' in namespace '{1}': {2}".format(
+ name, namespace, to_native(exc)
+ ),
+ error="",
+ status="",
+ reason="",
+ )
+ else:
+ self.fail_json(
+ "One of resource_definition, src, or name and namespace must be provided"
+ )
+
+ if parameter_file:
+ parameters = self.parse_dotenv_and_merge(parameters, parameter_file)
+
+ for k, v in parameters.items():
+ template = self.update_template_param(template, k, v)
+
+ result = {"changed": False}
+
+ try:
+ response = v1_processed_templates.create(
+ body=template, namespace=namespace
+ ).to_dict()
+ except DynamicApiError as exc:
+ self.fail_json(
+ msg="Server failed to render the Template: {0}".format(exc.body),
+ error=exc.status,
+ status=exc.status,
+ reason=exc.reason,
+ )
+ except Exception as exc:
+ self.fail_json(
+ msg="Server failed to render the Template: {0}".format(to_native(exc)),
+ error="",
+ status="",
+ reason="",
+ )
+ result["message"] = ""
+ if "message" in response:
+ result["message"] = response["message"]
+ result["resources"] = response["objects"]
+
+ if state != "rendered":
+ self.create_resources(response["objects"])
+
+ self.exit_json(**result)
+
+ def create_resources(self, definitions):
+
+ params = {"namespace": self.params.get("namespace_target")}
+
+ self.params["apply"] = False
+ self.params["validate"] = None
+
+ changed = False
+ results = []
+
+ flattened_definitions = []
+ for definition in definitions:
+ if definition is None:
+ continue
+ kind = definition.get("kind")
+ if kind and kind.endswith("List"):
+ flattened_definitions.extend(
+ self.flatten_list_kind(definition, params)
+ )
+ else:
+ flattened_definitions.append(self.merge_params(definition, params))
+
+ for definition in flattened_definitions:
+ result = self.perform_action(definition, self.params)
+ changed = changed or result["changed"]
+ results.append(result)
+
+ if len(results) == 1:
+ self.exit_json(**results[0])
+
+ self.exit_json(**{"changed": changed, "result": {"results": results}})
+
+ def update_template_param(self, template, k, v):
+ for i, param in enumerate(template["parameters"]):
+ if param["name"] == k:
+ template["parameters"][i]["value"] = v
+ return template
+ return template
+
+ def parse_dotenv_and_merge(self, parameters, parameter_file):
+ import re
+
+ DOTENV_PARSER = re.compile(
+ r"(?x)^(\s*(\#.*|\s*|(export\s+)?(?P<key>[A-z_][A-z0-9_.]*)=(?P<value>.+?)?)\s*)[\r\n]*$"
+ )
+ path = os.path.normpath(parameter_file)
+ if not os.path.exists(path):
+ self.fail(msg="Error accessing {0}. Does the file exist?".format(path))
+ try:
+ with open(path, "r") as f:
+ multiline = ""
+ for line in f.readlines():
+ line = line.strip()
+ if line.endswith("\\"):
+ multiline += " ".join(line.rsplit("\\", 1))
+ continue
+ if multiline:
+ line = multiline + line
+ multiline = ""
+ match = DOTENV_PARSER.search(line)
+ if not match:
+ continue
+ match = match.groupdict()
+ if match.get("key"):
+ if match["key"] in parameters:
+ self.fail_json(
+ msg="Duplicate value for '{0}' detected in parameter file".format(
+ match["key"]
+ )
+ )
+ parameters[match["key"]] = match["value"]
+ except IOError as exc:
+ self.fail(msg="Error loading parameter file: {0}".format(exc))
+ return parameters
diff --git a/ansible_collections/community/okd/plugins/module_utils/openshift_registry.py b/ansible_collections/community/okd/plugins/module_utils/openshift_registry.py
new file mode 100644
index 000000000..32a1830df
--- /dev/null
+++ b/ansible_collections/community/okd/plugins/module_utils/openshift_registry.py
@@ -0,0 +1,144 @@
+#!/usr/bin/env python
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import traceback
+from urllib.parse import urlparse
+
+from ansible_collections.community.okd.plugins.module_utils.openshift_common import AnsibleOpenshiftModule
+
+from ansible_collections.community.okd.plugins.module_utils.openshift_docker_image import (
+ parse_docker_image_ref,
+)
+
+try:
+ from requests import request
+ from requests.auth import HTTPBasicAuth
+ HAS_REQUESTS_MODULE = True
+ requests_import_exception = None
+except ImportError as e:
+ HAS_REQUESTS_MODULE = False
+ requests_import_exception = e
+ REQUESTS_MODULE_ERROR = traceback.format_exc()
+
+
+class OpenShiftRegistry(AnsibleOpenshiftModule):
+ def __init__(self, **kwargs):
+ super(OpenShiftRegistry, self).__init__(**kwargs)
+ self.check = self.params.get("check")
+
+ def list_image_streams(self, namespace=None):
+ kind = "ImageStream"
+ api_version = "image.openshift.io/v1"
+
+ params = dict(
+ kind=kind,
+ api_version=api_version,
+ namespace=namespace
+ )
+ result = self.kubernetes_facts(**params)
+ imagestream = []
+ if len(result["resources"]) > 0:
+ imagestream = result["resources"]
+ return imagestream
+
+ def find_registry_info(self):
+
+ def _determine_registry(image_stream):
+ public, internal = None, None
+ docker_repo = image_stream["status"].get("publicDockerImageRepository")
+ if docker_repo:
+ ref, err = parse_docker_image_ref(docker_repo, self.module)
+ public = ref["hostname"]
+
+ docker_repo = image_stream["status"].get("dockerImageRepository")
+ if docker_repo:
+ ref, err = parse_docker_image_ref(docker_repo, self.module)
+ internal = ref["hostname"]
+ return internal, public
+
+ # Try to determine registry hosts from Image Stream from 'openshift' namespace
+ for stream in self.list_image_streams(namespace="openshift"):
+ internal, public = _determine_registry(stream)
+ if not public and not internal:
+ self.fail_json(msg="The integrated registry has not been configured")
+ return internal, public
+
+ # Unable to determine registry from 'openshift' namespace, trying with all namespace
+ for stream in self.list_image_streams():
+ internal, public = _determine_registry(stream)
+ if not public and not internal:
+ self.fail_json(msg="The integrated registry has not been configured")
+ return internal, public
+
+ self.fail_json(msg="No Image Streams could be located to retrieve registry info.")
+
+ def execute_module(self):
+ result = {}
+ result["internal_hostname"], result["public_hostname"] = self.find_registry_info()
+
+ if self.check:
+ public_registry = result["public_hostname"]
+ if not public_registry:
+ result["check"] = dict(
+ reached=False,
+ msg="Registry does not have a public hostname."
+ )
+ else:
+ headers = {
+ 'Content-Type': 'application/json'
+ }
+ params = {
+ 'method': 'GET',
+ 'verify': False
+ }
+ if self.client.configuration.api_key:
+ headers.update(self.client.configuration.api_key)
+ elif self.client.configuration.username and self.client.configuration.password:
+ if not HAS_REQUESTS_MODULE:
+ result["check"] = dict(
+ reached=False,
+ msg="The requests python package is missing, try `pip install requests`",
+ error=requests_import_exception
+ )
+ self.exit_json(**result)
+ params.update(
+ dict(auth=HTTPBasicAuth(self.client.configuration.username, self.client.configuration.password))
+ )
+
+ # verify ssl
+ host = urlparse(public_registry)
+ if len(host.scheme) == 0:
+ registry_url = "https://" + public_registry
+
+ if registry_url.startswith("https://") and self.client.configuration.ssl_ca_cert:
+ params.update(
+ dict(verify=self.client.configuration.ssl_ca_cert)
+ )
+ params.update(
+ dict(headers=headers)
+ )
+ last_bad_status, last_bad_reason = None, None
+ for path in ("/", "/healthz"):
+ params.update(
+ dict(url=registry_url + path)
+ )
+ response = request(**params)
+ if response.status_code == 200:
+ result["check"] = dict(
+ reached=True,
+ msg="The local client can contact the integrated registry."
+ )
+ self.exit_json(**result)
+ last_bad_reason = response.reason
+ last_bad_status = response.status_code
+
+ result["check"] = dict(
+ reached=False,
+ msg="Unable to contact the integrated registry using local client. Status=%d, Reason=%s" % (
+ last_bad_status, last_bad_reason
+ )
+ )
+
+ self.exit_json(**result)
diff --git a/ansible_collections/community/okd/plugins/modules/k8s.py b/ansible_collections/community/okd/plugins/modules/k8s.py
new file mode 100644
index 000000000..c3b8d1b66
--- /dev/null
+++ b/ansible_collections/community/okd/plugins/modules/k8s.py
@@ -0,0 +1,308 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Chris Houseknecht <@chouseknecht>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+# STARTREMOVE (downstream)
+DOCUMENTATION = r'''
+
+module: k8s
+
+short_description: Manage OpenShift objects
+
+author:
+ - "Chris Houseknecht (@chouseknecht)"
+ - "Fabian von Feilitzsch (@fabianvf)"
+
+description:
+ - Use the Kubernetes Python client to perform CRUD operations on K8s objects.
+ - Pass the object definition from a source file or inline. See examples for reading
+ files and using Jinja templates or vault-encrypted files.
+ - Access to the full range of K8s APIs.
+ - Use the M(kubernetes.core.k8s_info) module to obtain a list of items about an object of type C(kind).
+ - Authenticate using either a config file, certificates, password or token.
+ - Supports check mode.
+ - Optimized for OKD/OpenShift Kubernetes flavors.
+
+extends_documentation_fragment:
+ - kubernetes.core.k8s_name_options
+ - kubernetes.core.k8s_resource_options
+ - kubernetes.core.k8s_auth_options
+ - kubernetes.core.k8s_wait_options
+ - kubernetes.core.k8s_delete_options
+
+options:
+ state:
+ description:
+ - Determines if an object should be created, patched, or deleted. When set to C(present), an object will be
+ created, if it does not already exist. If set to C(absent), an existing object will be deleted. If set to
+ C(present), an existing object will be patched, if its attributes differ from those specified using
+ I(resource_definition) or I(src).
+ - C(patched) state is an existing resource that has a given patch applied. If the resource doesn't exist, silently skip it (do not raise an error).
+ type: str
+ default: present
+ choices: [ absent, present, patched ]
+ force:
+ description:
+ - If set to C(yes), and I(state) is C(present), an existing object will be replaced.
+ type: bool
+ default: no
+ merge_type:
+ description:
+ - Whether to override the default patch merge approach with a specific type. By default, the strategic
+ merge will typically be used.
+ - For example, Custom Resource Definitions typically aren't updatable by the usual strategic merge. You may
+ want to use C(merge) if you see "strategic merge patch format is not supported"
+ - See U(https://kubernetes.io/docs/tasks/run-application/update-api-object-kubectl-patch/#use-a-json-merge-patch-to-update-a-deployment)
+ - If more than one merge_type is given, the merge_types will be tried in order
+ - Defaults to C(['strategic-merge', 'merge']), which is ideal for using the same parameters
+ on resource kinds that combine Custom Resources and built-in resources.
+ - mutually exclusive with C(apply)
+ - I(merge_type=json) is deprecated and will be removed in version 3.0.0. Please use M(kubernetes.core.k8s_json_patch) instead.
+ choices:
+ - json
+ - merge
+ - strategic-merge
+ type: list
+ elements: str
+ validate:
+ description:
+ - how (if at all) to validate the resource definition against the kubernetes schema.
+ Requires the kubernetes-validate python module
+ suboptions:
+ fail_on_error:
+ description: whether to fail on validation errors.
+ type: bool
+ version:
+ description: version of Kubernetes to validate against. defaults to Kubernetes server version
+ type: str
+ strict:
+ description: whether to fail when passing unexpected properties
+ default: True
+ type: bool
+ type: dict
+ append_hash:
+ description:
+ - Whether to append a hash to a resource name for immutability purposes
+ - Applies only to ConfigMap and Secret resources
+ - The parameter will be silently ignored for other resource kinds
+ - The full definition of an object is needed to generate the hash - this means that deleting an object created with append_hash
+ will only work if the same object is passed with state=absent (alternatively, just use state=absent with the name including
+ the generated hash and append_hash=no)
+ type: bool
+ default: false
+ apply:
+ description:
+ - C(apply) compares the desired resource definition with the previously supplied resource definition,
+ ignoring properties that are automatically generated
+ - C(apply) works better with Services than 'force=yes'
+ - mutually exclusive with C(merge_type)
+ type: bool
+ default: false
+ template:
+ description:
+ - Provide a valid YAML template definition file for an object when creating or updating.
+ - Value can be provided as string or dictionary.
+ - Mutually exclusive with C(src) and C(resource_definition).
+ - Template files needs to be present on the Ansible Controller's file system.
+ - Additional parameters can be specified using dictionary.
+ - 'Valid additional parameters - '
+ - 'C(newline_sequence) (str): Specify the newline sequence to use for templating files.
+ valid choices are "\n", "\r", "\r\n". Default value "\n".'
+ - 'C(block_start_string) (str): The string marking the beginning of a block.
+ Default value "{%".'
+ - 'C(block_end_string) (str): The string marking the end of a block.
+ Default value "%}".'
+ - 'C(variable_start_string) (str): The string marking the beginning of a print statement.
+ Default value "{{".'
+ - 'C(variable_end_string) (str): The string marking the end of a print statement.
+ Default value "}}".'
+ - 'C(trim_blocks) (bool): Determine when newlines should be removed from blocks. When set to C(yes) the first newline
+ after a block is removed (block, not variable tag!). Default value is true.'
+ - 'C(lstrip_blocks) (bool): Determine when leading spaces and tabs should be stripped.
+ When set to C(yes) leading spaces and tabs are stripped from the start of a line to a block.
+ This functionality requires Jinja 2.7 or newer. Default value is false.'
+ type: raw
+ version_added: '2.0.0'
+ continue_on_error:
+ description:
+ - Whether to continue on creation/deletion errors when multiple resources are defined.
+ - This has no effect on the validation step which is controlled by the C(validate.fail_on_error) parameter.
+ type: bool
+ default: False
+ version_added: 2.0.0
+
+requirements:
+ - "python >= 3.6"
+ - "kubernetes >= 12.0.0"
+ - "PyYAML >= 3.11"
+'''
+
+EXAMPLES = r'''
+- name: Create a k8s namespace
+ community.okd.k8s:
+ name: testing
+ api_version: v1
+ kind: Namespace
+ state: present
+
+- name: Create a Service object from an inline definition
+ community.okd.k8s:
+ state: present
+ definition:
+ apiVersion: v1
+ kind: Service
+ metadata:
+ name: web
+ namespace: testing
+ labels:
+ app: galaxy
+ service: web
+ spec:
+ selector:
+ app: galaxy
+ service: web
+ ports:
+ - protocol: TCP
+ targetPort: 8000
+ name: port-8000-tcp
+ port: 8000
+
+- name: Remove an existing Service object
+ community.okd.k8s:
+ state: absent
+ api_version: v1
+ kind: Service
+ namespace: testing
+ name: web
+
+# Passing the object definition from a file
+
+- name: Create a Deployment by reading the definition from a local file
+ community.okd.k8s:
+ state: present
+ src: /testing/deployment.yml
+
+- name: >-
+ Read definition file from the Ansible controller file system.
+ If the definition file has been encrypted with Ansible Vault it will automatically be decrypted.
+ community.okd.k8s:
+ state: present
+ definition: "{{ lookup('file', '/testing/deployment.yml') | from_yaml }}"
+
+- name: Read definition file from the Ansible controller file system after Jinja templating
+ community.okd.k8s:
+ state: present
+ definition: "{{ lookup('template', '/testing/deployment.yml') | from_yaml }}"
+
+- name: fail on validation errors
+ community.okd.k8s:
+ state: present
+ definition: "{{ lookup('template', '/testing/deployment.yml') | from_yaml }}"
+ validate:
+ fail_on_error: yes
+
+- name: warn on validation errors, check for unexpected properties
+ community.okd.k8s:
+ state: present
+ definition: "{{ lookup('template', '/testing/deployment.yml') | from_yaml }}"
+ validate:
+ fail_on_error: no
+ strict: yes
+'''
+
+RETURN = r'''
+result:
+ description:
+ - The created, patched, or otherwise present object. Will be empty in the case of a deletion.
+ returned: success
+ type: complex
+ contains:
+ api_version:
+ description: The versioned schema of this representation of an object.
+ returned: success
+ type: str
+ kind:
+ description: Represents the REST resource this object represents.
+ returned: success
+ type: str
+ metadata:
+ description: Standard object metadata. Includes name, namespace, annotations, labels, etc.
+ returned: success
+ type: complex
+ spec:
+ description: Specific attributes of the object. Will vary based on the I(api_version) and I(kind).
+ returned: success
+ type: complex
+ status:
+ description: Current status details for the object.
+ returned: success
+ type: complex
+ items:
+ description: Returned only when multiple yaml documents are passed to src or resource_definition
+ returned: when resource_definition or src contains list of objects
+ type: list
+ duration:
+ description: elapsed time of task in seconds
+ returned: when C(wait) is true
+ type: int
+ sample: 48
+ error:
+ description: error while trying to create/delete the object.
+ returned: error
+ type: complex
+'''
+# ENDREMOVE (downstream)
+
+from ansible_collections.kubernetes.core.plugins.module_utils.args_common import (
+ NAME_ARG_SPEC, RESOURCE_ARG_SPEC, AUTH_ARG_SPEC, WAIT_ARG_SPEC, DELETE_OPTS_ARG_SPEC
+)
+
+
+def validate_spec():
+ return dict(
+ fail_on_error=dict(type='bool'),
+ version=dict(),
+ strict=dict(type='bool', default=True)
+ )
+
+
+def argspec():
+ argument_spec = {}
+ argument_spec.update(NAME_ARG_SPEC)
+ argument_spec.update(RESOURCE_ARG_SPEC)
+ argument_spec.update(AUTH_ARG_SPEC)
+ argument_spec.update(WAIT_ARG_SPEC)
+ argument_spec['merge_type'] = dict(type='list', elements='str', choices=['json', 'merge', 'strategic-merge'])
+ argument_spec['validate'] = dict(type='dict', default=None, options=validate_spec())
+ argument_spec['append_hash'] = dict(type='bool', default=False)
+ argument_spec['apply'] = dict(type='bool', default=False)
+ argument_spec['template'] = dict(type='raw', default=None)
+ argument_spec['delete_options'] = dict(type='dict', default=None, options=DELETE_OPTS_ARG_SPEC)
+ argument_spec['continue_on_error'] = dict(type='bool', default=False)
+ argument_spec['state'] = dict(default='present', choices=['present', 'absent', 'patched'])
+ argument_spec['force'] = dict(type='bool', default=False)
+ return argument_spec
+
+
+def main():
+ mutually_exclusive = [
+ ('resource_definition', 'src'),
+ ('merge_type', 'apply'),
+ ('template', 'resource_definition'),
+ ('template', 'src'),
+ ]
+
+ from ansible_collections.community.okd.plugins.module_utils.k8s import OKDRawModule
+ module = OKDRawModule(argument_spec=argspec(), supports_check_mode=True, mutually_exclusive=mutually_exclusive)
+ module.run_module()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/okd/plugins/modules/openshift_adm_groups_sync.py b/ansible_collections/community/okd/plugins/modules/openshift_adm_groups_sync.py
new file mode 100644
index 000000000..66b0fbb15
--- /dev/null
+++ b/ansible_collections/community/okd/plugins/modules/openshift_adm_groups_sync.py
@@ -0,0 +1,224 @@
+#!/usr/bin/python
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+# STARTREMOVE (downstream)
+DOCUMENTATION = r"""
+
+module: openshift_adm_groups_sync
+
+short_description: Sync OpenShift Groups with records from an external provider.
+
+version_added: "2.1.0"
+
+author:
+ - Aubin Bikouo (@abikouo)
+
+description:
+ - In order to sync/prune OpenShift Group records with those from an external provider, determine which Groups you wish to sync
+ and where their records live.
+ - Analogous to `oc adm prune groups` and `oc adm group sync`.
+ - LDAP sync configuration file syntax can be found here
+ U(https://docs.openshift.com/container-platform/4.9/authentication/ldap-syncing.html).
+ - The bindPassword attribute of the LDAP sync configuration is expected to be a string,
+ please use ansible-vault encryption to secure this information.
+
+extends_documentation_fragment:
+ - kubernetes.core.k8s_auth_options
+
+options:
+ state:
+ description:
+ - Determines if the group should be sync when set to C(present) or pruned when set to C(absent).
+ type: str
+ default: present
+ choices: [ absent, present ]
+ type:
+ description:
+ - which groups allow and deny list entries refer to.
+ type: str
+ default: ldap
+ choices: [ ldap, openshift ]
+ sync_config:
+ description:
+ - Provide a valid YAML definition of an LDAP sync configuration.
+ type: dict
+ aliases:
+ - config
+ - src
+ required: True
+ deny_groups:
+ description:
+ - Denied groups, could be openshift group name or LDAP group dn value.
+ - When parameter C(type) is set to I(ldap) this should contains only LDAP group definition
+ like I(cn=developers,ou=groups,ou=rfc2307,dc=ansible,dc=redhat).
+ - The elements specified in this list will override the ones specified in C(allow_groups).
+ type: list
+ elements: str
+ default: []
+ allow_groups:
+ description:
+ - Allowed groups, could be openshift group name or LDAP group dn value.
+ - When parameter C(type) is set to I(ldap) this should contains only LDAP group definition
+ like I(cn=developers,ou=groups,ou=rfc2307,dc=ansible,dc=redhat).
+ type: list
+ elements: str
+ default: []
+
+requirements:
+ - python >= 3.6
+ - kubernetes >= 12.0.0
+ - python-ldap
+"""
+
+EXAMPLES = r"""
+# Prune all orphaned groups
+- name: Prune all orphan groups
+ openshift_adm_groups_sync:
+ state: absent
+ src: "{{ lookup('file', '/path/to/ldap-sync-config.yaml') | from_yaml }}"
+
+# Prune all orphaned groups from a list of specific groups specified in allow_groups
+- name: Prune all orphan groups from a list of specific groups specified in allow_groups
+ openshift_adm_groups_sync:
+ state: absent
+ src: "{{ lookup('file', '/path/to/ldap-sync-config.yaml') | from_yaml }}"
+ allow_groups:
+ - cn=developers,ou=groups,ou=rfc2307,dc=ansible,dc=redhat
+ - cn=developers,ou=groups,ou=rfc2307,dc=ansible,dc=redhat
+
+# Sync all groups from an LDAP server
+- name: Sync all groups from an LDAP server
+ openshift_adm_groups_sync:
+ src:
+ kind: LDAPSyncConfig
+ apiVersion: v1
+ url: ldap://localhost:1390
+ insecure: true
+ bindDN: cn=admin,dc=example,dc=org
+ bindPassword: adminpassword
+ rfc2307:
+ groupsQuery:
+ baseDN: "cn=admins,ou=groups,dc=example,dc=org"
+ scope: sub
+ derefAliases: never
+ filter: (objectClass=*)
+ pageSize: 0
+ groupUIDAttribute: dn
+ groupNameAttributes: [ cn ]
+ groupMembershipAttributes: [ member ]
+ usersQuery:
+ baseDN: "ou=users,dc=example,dc=org"
+ scope: sub
+ derefAliases: never
+ pageSize: 0
+ userUIDAttribute: dn
+ userNameAttributes: [ mail ]
+ tolerateMemberNotFoundErrors: true
+ tolerateMemberOutOfScopeErrors: true
+
+# Sync all groups except the ones from the deny_groups from an LDAP server
+- name: Sync all groups from an LDAP server using deny_groups
+ openshift_adm_groups_sync:
+ src: "{{ lookup('file', '/path/to/ldap-sync-config.yaml') | from_yaml }}"
+ deny_groups:
+ - cn=developers,ou=groups,ou=rfc2307,dc=ansible,dc=redhat
+ - cn=developers,ou=groups,ou=rfc2307,dc=ansible,dc=redhat
+
+# Sync all OpenShift Groups that have been synced previously with an LDAP server
+- name: Sync all OpenShift Groups that have been synced previously with an LDAP server
+ openshift_adm_groups_sync:
+ src: "{{ lookup('file', '/path/to/ldap-sync-config.yaml') | from_yaml }}"
+ type: openshift
+"""
+
+
+RETURN = r"""
+builds:
+ description:
+ - The groups that were created, updated or deleted
+ returned: success
+ type: list
+ elements: dict
+ sample: [
+ {
+ "apiVersion": "user.openshift.io/v1",
+ "kind": "Group",
+ "metadata": {
+ "annotations": {
+ "openshift.io/ldap.sync-time": "2021-12-17T12:20:28.125282",
+ "openshift.io/ldap.uid": "cn=developers,ou=groups,ou=rfc2307,dc=ansible,dc=redhat",
+ "openshift.io/ldap.url": "localhost:1390"
+ },
+ "creationTimestamp": "2021-12-17T11:09:49Z",
+ "labels": {
+ "openshift.io/ldap.host": "localhost"
+ },
+ "managedFields": [{
+ "apiVersion": "user.openshift.io/v1",
+ "fieldsType": "FieldsV1",
+ "fieldsV1": {
+ "f:metadata": {
+ "f:annotations": {
+ ".": {},
+ "f:openshift.io/ldap.sync-time": {},
+ "f:openshift.io/ldap.uid": {},
+ "f:openshift.io/ldap.url": {}
+ },
+ "f:labels": {
+ ".": {},
+ "f:openshift.io/ldap.host": {}
+ }
+ },
+ "f:users": {}
+ },
+ "manager": "OpenAPI-Generator",
+ "operation": "Update",
+ "time": "2021-12-17T11:09:49Z"
+ }],
+ "name": "developers",
+ "resourceVersion": "2014696",
+ "uid": "8dc211cb-1544-41e1-96b1-efffeed2d7d7"
+ },
+ "users": ["jordanbulls@ansible.org"]
+ }
+ ]
+"""
+# ENDREMOVE (downstream)
+
+import copy
+import traceback
+
+from ansible_collections.kubernetes.core.plugins.module_utils.args_common import AUTH_ARG_SPEC
+
+
+def argument_spec():
+ args = copy.deepcopy(AUTH_ARG_SPEC)
+ args.update(
+ dict(
+ state=dict(type='str', choices=['absent', 'present'], default='present'),
+ type=dict(type='str', choices=['ldap', 'openshift'], default='ldap'),
+ sync_config=dict(type='dict', aliases=['config', 'src'], required=True),
+ deny_groups=dict(type='list', elements='str', default=[]),
+ allow_groups=dict(type='list', elements='str', default=[]),
+ )
+ )
+ return args
+
+
+def main():
+ from ansible_collections.community.okd.plugins.module_utils.openshift_groups import (
+ OpenshiftGroupsSync
+ )
+
+ module = OpenshiftGroupsSync(argument_spec=argument_spec(), supports_check_mode=True)
+ module.run_module()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/okd/plugins/modules/openshift_adm_migrate_template_instances.py b/ansible_collections/community/okd/plugins/modules/openshift_adm_migrate_template_instances.py
new file mode 100644
index 000000000..05d5563cd
--- /dev/null
+++ b/ansible_collections/community/okd/plugins/modules/openshift_adm_migrate_template_instances.py
@@ -0,0 +1,371 @@
+#!/usr/bin/python
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+# STARTREMOVE (downstream)
+DOCUMENTATION = r"""
+module: openshift_adm_migrate_template_instances
+short_description: Update TemplateInstances to point to the latest group-version-kinds
+version_added: "2.2.0"
+author: Alina Buzachis (@alinabuzachis)
+description:
+ - Update TemplateInstances to point to the latest group-version-kinds.
+ - Analogous to C(oc adm migrate template-instances).
+extends_documentation_fragment:
+ - kubernetes.core.k8s_auth_options
+ - kubernetes.core.k8s_wait_options
+options:
+ namespace:
+ description:
+ - The namespace that the template can be found in.
+ - If no namespace if specified, migrate objects in all namespaces.
+ type: str
+requirements:
+ - python >= 3.6
+ - kubernetes >= 12.0.0
+"""
+
+EXAMPLES = r"""
+ - name: Migrate TemplateInstances in namespace=test
+ community.okd.openshift_adm_migrate_template_instances:
+ namespace: test
+ register: _result
+
+ - name: Migrate TemplateInstances in all namespaces
+ community.okd.openshift_adm_migrate_template_instances:
+ register: _result
+"""
+
+RETURN = r"""
+result:
+ description:
+ - List with all TemplateInstances that have been migrated.
+ type: list
+ returned: success
+ elements: dict
+ sample: [
+ {
+ "apiVersion": "template.openshift.io/v1",
+ "kind": "TemplateInstance",
+ "metadata": {
+ "creationTimestamp": "2021-11-10T11:12:09Z",
+ "finalizers": [
+ "template.openshift.io/finalizer"
+ ],
+ "managedFields": [
+ {
+ "apiVersion": "template.openshift.io/v1",
+ "fieldsType": "FieldsV1",
+ "fieldsV1": {
+ "f:spec": {
+ "f:template": {
+ "f:metadata": {
+ "f:name": {}
+ },
+ "f:objects": {},
+ "f:parameters": {}
+ }
+ }
+ },
+ "manager": "kubectl-create",
+ "operation": "Update",
+ "time": "2021-11-10T11:12:09Z"
+ },
+ {
+ "apiVersion": "template.openshift.io/v1",
+ "fieldsType": "FieldsV1",
+ "fieldsV1": {
+ "f:metadata": {
+ "f:finalizers": {
+ ".": {},
+ "v:\"template.openshift.io/finalizer\"": {}
+ }
+ },
+ "f:status": {
+ "f:conditions": {}
+ }
+ },
+ "manager": "openshift-controller-manager",
+ "operation": "Update",
+ "time": "2021-11-10T11:12:09Z"
+ },
+ {
+ "apiVersion": "template.openshift.io/v1",
+ "fieldsType": "FieldsV1",
+ "fieldsV1": {
+ "f:status": {
+ "f:objects": {}
+ }
+ },
+ "manager": "OpenAPI-Generator",
+ "operation": "Update",
+ "time": "2021-11-10T11:12:33Z"
+ }
+ ],
+ "name": "demo",
+ "namespace": "test",
+ "resourceVersion": "545370",
+ "uid": "09b795d7-7f07-4d94-bf0f-2150ee66f88d"
+ },
+ "spec": {
+ "requester": {
+ "groups": [
+ "system:masters",
+ "system:authenticated"
+ ],
+ "username": "system:admin"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "name": "template"
+ },
+ "objects": [
+ {
+ "apiVersion": "v1",
+ "kind": "Secret",
+ "metadata": {
+ "labels": {
+ "foo": "bar"
+ },
+ "name": "secret"
+ }
+ },
+ {
+ "apiVersion": "apps/v1",
+ "kind": "Deployment",
+ "metadata": {
+ "name": "deployment"
+ },
+ "spec": {
+ "replicas": 0,
+ "selector": {
+ "matchLabels": {
+ "key": "value"
+ }
+ },
+ "template": {
+ "metadata": {
+ "labels": {
+ "key": "value"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "image": "k8s.gcr.io/e2e-test-images/agnhost:2.32",
+ "name": "hello-openshift"
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "Route",
+ "metadata": {
+ "name": "route"
+ },
+ "spec": {
+ "to": {
+ "name": "foo"
+ }
+ }
+ }
+ ],
+ "parameters": [
+ {
+ "name": "NAME",
+ "value": "${NAME}"
+ }
+ ]
+ }
+ },
+ "status": {
+ "conditions": [
+ {
+ "lastTransitionTime": "2021-11-10T11:12:09Z",
+ "message": "",
+ "reason": "Created",
+ "status": "True",
+ "type": "Ready"
+ }
+ ],
+ "objects": [
+ {
+ "ref": {
+ "apiVersion": "v1",
+ "kind": "Secret",
+ "name": "secret",
+ "namespace": "test",
+ "uid": "33fad364-6d47-4f9c-9e51-92cba5602a57"
+ }
+ },
+ {
+ "ref": {
+ "apiVersion": "apps/v1",
+ "kind": "Deployment",
+ "name": "deployment",
+ "namespace": "test",
+ "uid": "3b527f88-42a1-4811-9e2f-baad4e4d8807"
+ }
+ },
+ {
+ "ref": {
+ "apiVersion": "route.openshift.io/v1.Route",
+ "kind": "Route",
+ "name": "route",
+ "namespace": "test",
+ "uid": "5b5411de-8769-4e27-ba52-6781630e4008"
+ }
+ }
+ ]
+ }
+ },
+ ...
+ ]
+"""
+# ENDREMOVE (downstream)
+
+from ansible.module_utils._text import to_native
+
+from ansible_collections.community.okd.plugins.module_utils.openshift_common import AnsibleOpenshiftModule
+
+try:
+ from kubernetes.dynamic.exceptions import DynamicApiError
+except ImportError:
+ pass
+
+from ansible_collections.kubernetes.core.plugins.module_utils.args_common import (
+ AUTH_ARG_SPEC,
+ WAIT_ARG_SPEC,
+)
+
+transforms = {
+ "Build": "build.openshift.io/v1",
+ "BuildConfig": "build.openshift.io/v1",
+ "DeploymentConfig": "apps.openshift.io/v1",
+ "Route": "route.openshift.io/v1",
+}
+
+
+class OpenShiftMigrateTemplateInstances(AnsibleOpenshiftModule):
+ def __init__(self, **kwargs):
+ super(OpenShiftMigrateTemplateInstances, self).__init__(**kwargs)
+
+ def patch_template_instance(self, resource, templateinstance):
+ result = None
+
+ try:
+ result = resource.status.patch(templateinstance)
+ except Exception as exc:
+ self.fail_json(
+ msg="Failed to migrate TemplateInstance {0} due to: {1}".format(
+ templateinstance["metadata"]["name"], to_native(exc)
+ )
+ )
+
+ return result.to_dict()
+
+ @staticmethod
+ def perform_migrations(templateinstances):
+ ti_list = []
+ ti_to_be_migrated = []
+
+ ti_list = (
+ templateinstances.get("kind") == "TemplateInstanceList"
+ and templateinstances.get("items")
+ or [templateinstances]
+ )
+
+ for ti_elem in ti_list:
+ objects = ti_elem["status"].get("objects")
+ if objects:
+ for i, obj in enumerate(objects):
+ object_type = obj["ref"]["kind"]
+ if (
+ object_type in transforms.keys()
+ and obj["ref"].get("apiVersion") != transforms[object_type]
+ ):
+ ti_elem["status"]["objects"][i]["ref"][
+ "apiVersion"
+ ] = transforms[object_type]
+ ti_to_be_migrated.append(ti_elem)
+
+ return ti_to_be_migrated
+
+ def execute_module(self):
+ templateinstances = None
+ namespace = self.params.get("namespace")
+ results = {"changed": False, "result": []}
+
+ resource = self.find_resource(
+ "templateinstances", "template.openshift.io/v1", fail=True
+ )
+
+ if namespace:
+ # Get TemplateInstances from a provided namespace
+ try:
+ templateinstances = resource.get(namespace=namespace).to_dict()
+ except DynamicApiError as exc:
+ self.fail_json(
+ msg="Failed to retrieve TemplateInstances in namespace '{0}': {1}".format(
+ namespace, exc.body
+ ),
+ error=exc.status,
+ status=exc.status,
+ reason=exc.reason,
+ )
+ except Exception as exc:
+ self.fail_json(
+ msg="Failed to retrieve TemplateInstances in namespace '{0}': {1}".format(
+ namespace, to_native(exc)
+ ),
+ error="",
+ status="",
+ reason="",
+ )
+ else:
+ # Get TemplateInstances from all namespaces
+ templateinstances = resource.get().to_dict()
+
+ ti_to_be_migrated = self.perform_migrations(templateinstances)
+
+ if ti_to_be_migrated:
+ if self.check_mode:
+ self.exit_json(
+ **{"changed": True, "result": ti_to_be_migrated}
+ )
+ else:
+ for ti_elem in ti_to_be_migrated:
+ results["result"].append(
+ self.patch_template_instance(resource, ti_elem)
+ )
+ results["changed"] = True
+
+ self.exit_json(**results)
+
+
+def argspec():
+ argument_spec = {}
+ argument_spec.update(AUTH_ARG_SPEC)
+ argument_spec.update(WAIT_ARG_SPEC)
+ argument_spec["namespace"] = dict(type="str")
+
+ return argument_spec
+
+
+def main():
+ argument_spec = argspec()
+ module = OpenShiftMigrateTemplateInstances(argument_spec=argument_spec, supports_check_mode=True)
+ module.run_module()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/okd/plugins/modules/openshift_adm_prune_auth.py b/ansible_collections/community/okd/plugins/modules/openshift_adm_prune_auth.py
new file mode 100644
index 000000000..a9833fa50
--- /dev/null
+++ b/ansible_collections/community/okd/plugins/modules/openshift_adm_prune_auth.py
@@ -0,0 +1,132 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2021, Red Hat
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+# STARTREMOVE (downstream)
+DOCUMENTATION = r'''
+
+module: openshift_adm_prune_auth
+
+short_description: Removes references to the specified roles, clusterroles, users, and groups
+
+version_added: "2.2.0"
+
+author:
+ - Aubin Bikouo (@abikouo)
+
+description:
+ - This module allow administrators to remove references to the specified roles, clusterroles, users, and groups.
+ - Analogous to C(oc adm prune auth).
+
+extends_documentation_fragment:
+ - kubernetes.core.k8s_auth_options
+
+options:
+ resource:
+ description:
+ - The specified resource to remove.
+ choices:
+ - roles
+ - clusterroles
+ - users
+ - groups
+ type: str
+ required: True
+ name:
+ description:
+ - Use to specify an object name to remove.
+ - Mutually exclusive with option I(label_selectors).
+ - If neither I(name) nor I(label_selectors) are specified, prune all resources in the namespace.
+ type: str
+ namespace:
+ description:
+ - Use to specify an object namespace.
+ - Ignored when I(resource) is set to C(clusterroles).
+ type: str
+ label_selectors:
+ description:
+ - Selector (label query) to filter on.
+ - Mutually exclusive with option I(name).
+ type: list
+ elements: str
+
+requirements:
+ - python >= 3.6
+ - kubernetes >= 12.0.0
+'''
+
+EXAMPLES = r'''
+- name: Prune all roles from default namespace
+ openshift_adm_prune_auth:
+ resource: roles
+ namespace: testing
+
+- name: Prune clusterroles using label selectors
+ openshift_adm_prune_auth:
+ resource: roles
+ namespace: testing
+ label_selectors:
+ - phase=production
+'''
+
+
+RETURN = r'''
+cluster_role_binding:
+ type: list
+ description: list of cluster role binding deleted.
+ returned: always
+role_binding:
+ type: list
+ description: list of role binding deleted.
+ returned: I(resource=users) or I(resource=groups) or I(resource=clusterroles)
+security_context_constraints:
+ type: list
+ description: list of Security Context Constraints deleted.
+ returned: I(resource=users) or I(resource=groups)
+authorization:
+ type: list
+ description: list of OAuthClientAuthorization deleted.
+ returned: I(resource=users)
+group:
+ type: list
+ description: list of Security Context Constraints deleted.
+ returned: I(resource=users)
+'''
+# ENDREMOVE (downstream)
+
+import copy
+
+from ansible_collections.kubernetes.core.plugins.module_utils.args_common import AUTH_ARG_SPEC
+
+
+def argument_spec():
+ args = copy.deepcopy(AUTH_ARG_SPEC)
+ args.update(
+ dict(
+ resource=dict(type='str', required=True, choices=['roles', 'clusterroles', 'users', 'groups']),
+ namespace=dict(type='str'),
+ name=dict(type='str'),
+ label_selectors=dict(type='list', elements='str'),
+ )
+ )
+ return args
+
+
+def main():
+
+ from ansible_collections.community.okd.plugins.module_utils.openshift_adm_prune_auth import (
+ OpenShiftAdmPruneAuth)
+
+ module = OpenShiftAdmPruneAuth(argument_spec=argument_spec(),
+ mutually_exclusive=[("name", "label_selectors")],
+ supports_check_mode=True)
+ module.run_module()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/okd/plugins/modules/openshift_adm_prune_builds.py b/ansible_collections/community/okd/plugins/modules/openshift_adm_prune_builds.py
new file mode 100644
index 000000000..b0b831e6f
--- /dev/null
+++ b/ansible_collections/community/okd/plugins/modules/openshift_adm_prune_builds.py
@@ -0,0 +1,124 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2021, Red Hat
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+# STARTREMOVE (downstream)
+DOCUMENTATION = r'''
+
+module: openshift_adm_prune_builds
+
+short_description: Prune old completed and failed builds
+
+version_added: "2.3.0"
+
+author:
+ - Aubin Bikouo (@abikouo)
+
+description:
+ - This module allow administrators to delete old completed and failed builds.
+ - Analogous to C(oc adm prune builds).
+
+extends_documentation_fragment:
+ - kubernetes.core.k8s_auth_options
+
+options:
+ namespace:
+ description:
+ - Use to specify namespace for builds to be deleted.
+ type: str
+ keep_younger_than:
+ description:
+ - Specify the minimum age (in minutes) of a Build for it to be considered a candidate for pruning.
+ type: int
+ orphans:
+ description:
+ - If C(true), prune all builds whose associated BuildConfig no longer exists and whose status is
+ complete, failed, error, or cancelled.
+ type: bool
+ default: False
+
+requirements:
+ - python >= 3.6
+ - kubernetes >= 12.0.0
+'''
+
+EXAMPLES = r'''
+# Run deleting older completed and failed builds and also including
+# all builds whose associated BuildConfig no longer exists
+- name: Run delete orphan Builds
+ community.okd.openshift_adm_prune_builds:
+ orphans: True
+
+# Run deleting older completed and failed builds keep younger than 2hours
+- name: Run delete builds, keep younger than 2h
+ community.okd.openshift_adm_prune_builds:
+ keep_younger_than: 120
+
+# Run deleting builds from specific namespace
+- name: Run delete builds from namespace
+ community.okd.openshift_adm_prune_builds:
+ namespace: testing_namespace
+'''
+
+RETURN = r'''
+builds:
+ description:
+ - The builds that were deleted
+ returned: success
+ type: complex
+ contains:
+ api_version:
+ description: The versioned schema of this representation of an object.
+ returned: success
+ type: str
+ kind:
+ description: Represents the REST resource this object represents.
+ returned: success
+ type: str
+ metadata:
+ description: Standard object metadata. Includes name, namespace, annotations, labels, etc.
+ returned: success
+ type: dict
+ spec:
+ description: Specific attributes of the object. Will vary based on the I(api_version) and I(kind).
+ returned: success
+ type: dict
+ status:
+ description: Current status details for the object.
+ returned: success
+ type: dict
+'''
+# ENDREMOVE (downstream)
+
+import copy
+
+from ansible_collections.kubernetes.core.plugins.module_utils.args_common import AUTH_ARG_SPEC
+
+
+def argument_spec():
+ args = copy.deepcopy(AUTH_ARG_SPEC)
+ args.update(
+ dict(
+ namespace=dict(type='str'),
+ keep_younger_than=dict(type='int'),
+ orphans=dict(type='bool', default=False),
+ )
+ )
+ return args
+
+
+def main():
+
+ from ansible_collections.community.okd.plugins.module_utils.openshift_builds import OpenShiftPruneBuilds
+
+ module = OpenShiftPruneBuilds(argument_spec=argument_spec(), supports_check_mode=True)
+ module.run_module()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/okd/plugins/modules/openshift_adm_prune_deployments.py b/ansible_collections/community/okd/plugins/modules/openshift_adm_prune_deployments.py
new file mode 100644
index 000000000..bdef18460
--- /dev/null
+++ b/ansible_collections/community/okd/plugins/modules/openshift_adm_prune_deployments.py
@@ -0,0 +1,100 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2021, Red Hat
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+# STARTREMOVE (downstream)
+DOCUMENTATION = r'''
+
+module: openshift_adm_prune_deployments
+
+short_description: Remove old completed and failed deployment configs
+
+version_added: "2.2.0"
+
+author:
+ - Aubin Bikouo (@abikouo)
+
+description:
+ - This module allow administrators to remove old completed and failed deployment configs.
+ - Analogous to C(oc adm prune deployments).
+
+extends_documentation_fragment:
+ - kubernetes.core.k8s_auth_options
+
+options:
+ namespace:
+ description:
+ - Use to specify namespace for deployments to be deleted.
+ type: str
+ keep_younger_than:
+ description:
+ - Specify the minimum age (in minutes) of a deployment for it to be considered a candidate for pruning.
+ type: int
+ orphans:
+ description:
+ - If C(true), prune all deployments where the associated DeploymentConfig no longer exists,
+ the status is complete or failed, and the replica size is C(0).
+ type: bool
+ default: False
+
+requirements:
+ - python >= 3.6
+ - kubernetes >= 12.0.0
+'''
+
+EXAMPLES = r'''
+- name: Prune Deployments from testing namespace
+ community.okd.openshift_adm_prune_deployments:
+ namespace: testing
+
+- name: Prune orphans deployments, keep younger than 2hours
+ community.okd.openshift_adm_prune_deployments:
+ orphans: True
+ keep_younger_than: 120
+'''
+
+
+RETURN = r'''
+replication_controllers:
+ type: list
+ description: list of replication controllers candidate for pruning.
+ returned: always
+'''
+# ENDREMOVE (downstream)
+
+import copy
+
+try:
+ from ansible_collections.kubernetes.core.plugins.module_utils.args_common import AUTH_ARG_SPEC
+except ImportError as e:
+ pass
+
+
+def argument_spec():
+ args = copy.deepcopy(AUTH_ARG_SPEC)
+ args.update(
+ dict(
+ namespace=dict(type='str',),
+ keep_younger_than=dict(type='int',),
+ orphans=dict(type='bool', default=False),
+ )
+ )
+ return args
+
+
+def main():
+
+ from ansible_collections.community.okd.plugins.module_utils.openshift_adm_prune_deployments import (
+ OpenShiftAdmPruneDeployment)
+
+ module = OpenShiftAdmPruneDeployment(argument_spec=argument_spec(), supports_check_mode=True)
+ module.run_module()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/okd/plugins/modules/openshift_adm_prune_images.py b/ansible_collections/community/okd/plugins/modules/openshift_adm_prune_images.py
new file mode 100644
index 000000000..d470fa871
--- /dev/null
+++ b/ansible_collections/community/okd/plugins/modules/openshift_adm_prune_images.py
@@ -0,0 +1,315 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2021, Red Hat
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+# STARTREMOVE (downstream)
+DOCUMENTATION = r'''
+
+module: openshift_adm_prune_images
+
+short_description: Remove unreferenced images
+
+version_added: "2.2.0"
+
+author:
+ - Aubin Bikouo (@abikouo)
+
+description:
+ - This module allow administrators to remove references images.
+ - Note that if the C(namespace) is specified, only references images on Image stream for the corresponding
+ namespace will be candidate for prune if only they are not used or references in another Image stream from
+ another namespace.
+ - Analogous to C(oc adm prune images).
+
+extends_documentation_fragment:
+ - kubernetes.core.k8s_auth_options
+
+options:
+ namespace:
+ description:
+ - Use to specify namespace for objects.
+ type: str
+ all_images:
+ description:
+ - Include images that were imported from external registries as candidates for pruning.
+ - If pruned, all the mirrored objects associated with them will also be removed from the integrated registry.
+ type: bool
+ default: True
+ keep_younger_than:
+ description:
+ - Specify the minimum age (in minutes) of an image and its referrers for it to be considered a candidate for pruning.
+ type: int
+ prune_over_size_limit:
+ description:
+ - Specify if images which are exceeding LimitRanges specified in the same namespace,
+ should be considered for pruning.
+ type: bool
+ default: False
+ registry_url:
+ description:
+ - The address to use when contacting the registry, instead of using the default value.
+ - This is useful if you can't resolve or reach the default registry but you do have an
+ alternative route that works.
+ - Particular transport protocol can be enforced using '<scheme>://' prefix.
+ type: str
+ registry_ca_cert:
+ description:
+ - Path to a CA certificate used to contact registry. The full certificate chain must be provided to
+ avoid certificate validation errors.
+ type: path
+ registry_validate_certs:
+ description:
+ - Whether or not to verify the API server's SSL certificates. Can also be specified via K8S_AUTH_VERIFY_SSL
+ environment variable.
+ type: bool
+ prune_registry:
+ description:
+ - If set to I(False), the prune operation will clean up image API objects, but
+ none of the associated content in the registry is removed.
+ type: bool
+ default: True
+ ignore_invalid_refs:
+ description:
+ - If set to I(True), the pruning process will ignore all errors while parsing image references.
+ - This means that the pruning process will ignore the intended connection between the object and the referenced image.
+ - As a result an image may be incorrectly deleted as unused.
+ type: bool
+ default: False
+requirements:
+ - python >= 3.6
+ - kubernetes >= 12.0.0
+ - docker-image-py
+'''
+
+EXAMPLES = r'''
+# Prune if only images and their referrers were more than an hour old
+- name: Prune image with referrer been more than an hour old
+ community.okd.openshift_adm_prune_images:
+ keep_younger_than: 60
+
+# Remove images exceeding currently set limit ranges
+- name: Remove images exceeding currently set limit ranges
+ community.okd.openshift_adm_prune_images:
+ prune_over_size_limit: true
+
+# Force the insecure http protocol with the particular registry host name
+- name: Prune images using custom registry
+ community.okd.openshift_adm_prune_images:
+ registry_url: http://registry.example.org
+ registry_validate_certs: false
+'''
+
+
+RETURN = r'''
+updated_image_streams:
+ description:
+ - The images streams updated.
+ returned: success
+ type: list
+ elements: dict
+ sample: [
+ {
+ "apiVersion": "image.openshift.io/v1",
+ "kind": "ImageStream",
+ "metadata": {
+ "annotations": {
+ "openshift.io/image.dockerRepositoryCheck": "2021-12-07T07:55:30Z"
+ },
+ "creationTimestamp": "2021-12-07T07:55:30Z",
+ "generation": 1,
+ "name": "python",
+ "namespace": "images",
+ "resourceVersion": "1139215",
+ "uid": "443bad2c-9fd4-4c8f-8a24-3eca4426b07f"
+ },
+ "spec": {
+ "lookupPolicy": {
+ "local": false
+ },
+ "tags": [
+ {
+ "annotations": null,
+ "from": {
+ "kind": "DockerImage",
+ "name": "python:3.8.12"
+ },
+ "generation": 1,
+ "importPolicy": {
+ "insecure": true
+ },
+ "name": "3.8.12",
+ "referencePolicy": {
+ "type": "Source"
+ }
+ }
+ ]
+ },
+ "status": {
+ "dockerImageRepository": "image-registry.openshift-image-registry.svc:5000/images/python",
+ "publicDockerImageRepository": "default-route-openshift-image-registry.apps-crc.testing/images/python",
+ "tags": []
+ }
+ },
+ ...
+ ]
+deleted_images:
+ description:
+ - The images deleted.
+ returned: success
+ type: list
+ elements: dict
+ sample: [
+ {
+ "apiVersion": "image.openshift.io/v1",
+ "dockerImageLayers": [
+ {
+ "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
+ "name": "sha256:5e0b432e8ba9d9029a000e627840b98ffc1ed0c5172075b7d3e869be0df0fe9b",
+ "size": 54932878
+ },
+ {
+ "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
+ "name": "sha256:a84cfd68b5cea612a8343c346bfa5bd6c486769010d12f7ec86b23c74887feb2",
+ "size": 5153424
+ },
+ {
+ "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
+ "name": "sha256:e8b8f2315954535f1e27cd13d777e73da4a787b0aebf4241d225beff3c91cbb1",
+ "size": 10871995
+ },
+ {
+ "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
+ "name": "sha256:0598fa43a7e793a76c198e8d45d8810394e1cfc943b2673d7fcf5a6fdc4f45b3",
+ "size": 54567844
+ },
+ {
+ "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
+ "name": "sha256:83098237b6d3febc7584c1f16076a32ac01def85b0d220ab46b6ebb2d6e7d4d4",
+ "size": 196499409
+ },
+ {
+ "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
+ "name": "sha256:b92c73d4de9a6a8f6b96806a04857ab33cf6674f6411138603471d744f44ef55",
+ "size": 6290769
+ },
+ {
+ "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
+ "name": "sha256:ef9b6ee59783b84a6ec0c8b109c409411ab7c88fa8c53fb3760b5fde4eb0aa07",
+ "size": 16812698
+ },
+ {
+ "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
+ "name": "sha256:c1f6285e64066d36477a81a48d3c4f1dc3c03dddec9e72d97da13ba51bca0d68",
+ "size": 234
+ },
+ {
+ "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
+ "name": "sha256:a0ee7333301245b50eb700f96d9e13220cdc31871ec9d8e7f0ff7f03a17c6fb3",
+ "size": 2349241
+ }
+ ],
+ "dockerImageManifestMediaType": "application/vnd.docker.distribution.manifest.v2+json",
+ "dockerImageMetadata": {
+ "Architecture": "amd64",
+ "Config": {
+ "Cmd": [
+ "python3"
+ ],
+ "Env": [
+ "PATH=/usr/local/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
+ "LANG=C.UTF-8",
+ "GPG_KEY=E3FF2839C048B25C084DEBE9B26995E310250568",
+ "PYTHON_VERSION=3.8.12",
+ "PYTHON_PIP_VERSION=21.2.4",
+ "PYTHON_SETUPTOOLS_VERSION=57.5.0",
+ "PYTHON_GET_PIP_URL=https://github.com/pypa/get-pip/raw/3cb8888cc2869620f57d5d2da64da38f516078c7/public/get-pip.py",
+ "PYTHON_GET_PIP_SHA256=c518250e91a70d7b20cceb15272209a4ded2a0c263ae5776f129e0d9b5674309"
+ ],
+ "Image": "sha256:cc3a2931749afa7dede97e32edbbe3e627b275c07bf600ac05bc0dc22ef203de"
+ },
+ "Container": "b43fcf5052feb037f6d204247d51ac8581d45e50f41c6be2410d94b5c3a3453d",
+ "ContainerConfig": {
+ "Cmd": [
+ "/bin/sh",
+ "-c",
+ "#(nop) ",
+ "CMD [\"python3\"]"
+ ],
+ "Env": [
+ "PATH=/usr/local/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
+ "LANG=C.UTF-8",
+ "GPG_KEY=E3FF2839C048B25C084DEBE9B26995E310250568",
+ "PYTHON_VERSION=3.8.12",
+ "PYTHON_PIP_VERSION=21.2.4",
+ "PYTHON_SETUPTOOLS_VERSION=57.5.0",
+ "PYTHON_GET_PIP_URL=https://github.com/pypa/get-pip/raw/3cb8888cc2869620f57d5d2da64da38f516078c7/public/get-pip.py",
+ "PYTHON_GET_PIP_SHA256=c518250e91a70d7b20cceb15272209a4ded2a0c263ae5776f129e0d9b5674309"
+ ],
+ "Hostname": "b43fcf5052fe",
+ "Image": "sha256:cc3a2931749afa7dede97e32edbbe3e627b275c07bf600ac05bc0dc22ef203de"
+ },
+ "Created": "2021-12-03T01:53:41Z",
+ "DockerVersion": "20.10.7",
+ "Id": "sha256:f746089c9d02d7126bbe829f788e093853a11a7f0421049267a650d52bbcac37",
+ "Size": 347487141,
+ "apiVersion": "image.openshift.io/1.0",
+ "kind": "DockerImage"
+ },
+ "dockerImageMetadataVersion": "1.0",
+ "dockerImageReference": "python@sha256:a874dcabc74ca202b92b826521ff79dede61caca00ceab0b65024e895baceb58",
+ "kind": "Image",
+ "metadata": {
+ "annotations": {
+ "image.openshift.io/dockerLayersOrder": "ascending"
+ },
+ "creationTimestamp": "2021-12-07T07:55:30Z",
+ "name": "sha256:a874dcabc74ca202b92b826521ff79dede61caca00ceab0b65024e895baceb58",
+ "resourceVersion": "1139214",
+ "uid": "33be6ab4-af79-4f44-a0fd-4925bd473c1f"
+ }
+ },
+ ...
+ ]
+'''
+# ENDREMOVE (downstream)
+
+import copy
+
+from ansible_collections.kubernetes.core.plugins.module_utils.args_common import AUTH_ARG_SPEC
+
+
+def argument_spec():
+ args = copy.deepcopy(AUTH_ARG_SPEC)
+ args.update(
+ dict(
+ namespace=dict(type='str'),
+ all_images=dict(type='bool', default=True),
+ keep_younger_than=dict(type='int'),
+ prune_over_size_limit=dict(type='bool', default=False),
+ registry_url=dict(type='str'),
+ registry_validate_certs=dict(type='bool'),
+ registry_ca_cert=dict(type='path'),
+ prune_registry=dict(type='bool', default=True),
+ ignore_invalid_refs=dict(type='bool', default=False),
+ )
+ )
+ return args
+
+
+def main():
+
+ from ansible_collections.community.okd.plugins.module_utils.openshift_adm_prune_images import (
+ OpenShiftAdmPruneImages
+ )
+
+ module = OpenShiftAdmPruneImages(argument_spec=argument_spec(), supports_check_mode=True)
+ module.run_module()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/okd/plugins/modules/openshift_auth.py b/ansible_collections/community/okd/plugins/modules/openshift_auth.py
new file mode 100644
index 000000000..422018cc5
--- /dev/null
+++ b/ansible_collections/community/okd/plugins/modules/openshift_auth.py
@@ -0,0 +1,392 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2018, KubeVirt Team <@kubevirt>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+
+module: openshift_auth
+
+short_description: Authenticate to OpenShift clusters which require an explicit login step
+
+version_added: "0.2.0"
+
+author:
+ - KubeVirt Team (@kubevirt)
+ - Fabian von Feilitzsch (@fabianvf)
+
+description:
+ - This module handles authenticating to OpenShift clusters requiring I(explicit) authentication procedures,
+ meaning ones where a client logs in (obtains an authentication token), performs API operations using said
+ token and then logs out (revokes the token).
+ - On the other hand a popular configuration for username+password authentication is one utilizing HTTP Basic
+ Auth, which does not involve any additional login/logout steps (instead login credentials can be attached
+ to each and every API call performed) and as such is handled directly by the C(k8s) module (and other
+ resource–specific modules) by utilizing the C(host), C(username) and C(password) parameters. Please
+ consult your preferred module's documentation for more details.
+
+options:
+ state:
+ description:
+ - If set to I(present) connect to the API server using the URL specified in C(host) and attempt to log in.
+ - If set to I(absent) attempt to log out by revoking the authentication token specified in C(api_key).
+ default: present
+ choices:
+ - present
+ - absent
+ type: str
+ host:
+ description:
+ - Provide a URL for accessing the API server.
+ required: true
+ type: str
+ username:
+ description:
+ - Provide a username for authenticating with the API server.
+ type: str
+ password:
+ description:
+ - Provide a password for authenticating with the API server.
+ type: str
+ ca_cert:
+ description:
+ - "Path to a CA certificate file used to verify connection to the API server. The full certificate chain
+ must be provided to avoid certificate validation errors."
+ aliases: [ ssl_ca_cert ]
+ type: path
+ validate_certs:
+ description:
+ - "Whether or not to verify the API server's SSL certificates."
+ type: bool
+ default: true
+ aliases: [ verify_ssl ]
+ api_key:
+ description:
+ - When C(state) is set to I(absent), this specifies the token to revoke.
+ type: str
+
+requirements:
+ - python >= 3.6
+ - urllib3
+ - requests
+ - requests-oauthlib
+'''
+
+EXAMPLES = r'''
+- hosts: localhost
+ module_defaults:
+ group/community.okd.okd:
+ host: https://k8s.example.com/
+ ca_cert: ca.pem
+ tasks:
+ - block:
+ # It's good practice to store login credentials in a secure vault and not
+ # directly in playbooks.
+ - include_vars: openshift_passwords.yml
+
+ - name: Log in (obtain access token)
+ community.okd.openshift_auth:
+ username: admin
+ password: "{{ openshift_admin_password }}"
+ register: openshift_auth_results
+
+ # Previous task provides the token/api_key, while all other parameters
+ # are taken from module_defaults
+ - name: Get a list of all pods from any namespace
+ kubernetes.core.k8s_info:
+ api_key: "{{ openshift_auth_results.openshift_auth.api_key }}"
+ kind: Pod
+ register: pod_list
+
+ always:
+ - name: If login succeeded, try to log out (revoke access token)
+ when: openshift_auth_results.openshift_auth.api_key is defined
+ community.okd.openshift_auth:
+ state: absent
+ api_key: "{{ openshift_auth_results.openshift_auth.api_key }}"
+'''
+
+# Returned value names need to match k8s modules parameter names, to make it
+# easy to pass returned values of openshift_auth to other k8s modules.
+# Discussion: https://github.com/ansible/ansible/pull/50807#discussion_r248827899
+RETURN = r'''
+openshift_auth:
+ description: OpenShift authentication facts.
+ returned: success
+ type: complex
+ contains:
+ api_key:
+ description: Authentication token.
+ returned: success
+ type: str
+ host:
+ description: URL for accessing the API server.
+ returned: success
+ type: str
+ ca_cert:
+ description: Path to a CA certificate file used to verify connection to the API server.
+ returned: success
+ type: str
+ validate_certs:
+ description: "Whether or not to verify the API server's SSL certificates."
+ returned: success
+ type: bool
+ username:
+ description: Username for authenticating with the API server.
+ returned: success
+ type: str
+k8s_auth:
+ description: Same as returned openshift_auth. Kept only for backwards compatibility
+ returned: success
+ type: complex
+ contains:
+ api_key:
+ description: Authentication token.
+ returned: success
+ type: str
+ host:
+ description: URL for accessing the API server.
+ returned: success
+ type: str
+ ca_cert:
+ description: Path to a CA certificate file used to verify connection to the API server.
+ returned: success
+ type: str
+ validate_certs:
+ description: "Whether or not to verify the API server's SSL certificates."
+ returned: success
+ type: bool
+ username:
+ description: Username for authenticating with the API server.
+ returned: success
+ type: str
+'''
+
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib_parse import urlparse, parse_qs, urlencode
+from urllib.parse import urljoin
+
+from base64 import urlsafe_b64encode
+import hashlib
+
+# 3rd party imports
+try:
+ import requests
+ HAS_REQUESTS = True
+except ImportError:
+ HAS_REQUESTS = False
+
+try:
+ from requests_oauthlib import OAuth2Session
+ HAS_REQUESTS_OAUTH = True
+except ImportError:
+ HAS_REQUESTS_OAUTH = False
+
+try:
+ from urllib3.util import make_headers
+ HAS_URLLIB3 = True
+except ImportError:
+ HAS_URLLIB3 = False
+
+
+K8S_AUTH_ARG_SPEC = {
+ 'state': {
+ 'default': 'present',
+ 'choices': ['present', 'absent'],
+ },
+ 'host': {'required': True},
+ 'username': {},
+ 'password': {'no_log': True},
+ 'ca_cert': {'type': 'path', 'aliases': ['ssl_ca_cert']},
+ 'validate_certs': {
+ 'type': 'bool',
+ 'default': True,
+ 'aliases': ['verify_ssl']
+ },
+ 'api_key': {'no_log': True},
+}
+
+
+def get_oauthaccesstoken_objectname_from_token(token_name):
+
+ """
+ openshift convert the access token to an OAuthAccessToken resource name using the algorithm
+ https://github.com/openshift/console/blob/9f352ba49f82ad693a72d0d35709961428b43b93/pkg/server/server.go#L609-L613
+ """
+
+ sha256Prefix = "sha256~"
+ content = token_name.strip(sha256Prefix)
+
+ b64encoded = urlsafe_b64encode(hashlib.sha256(content.encode()).digest()).rstrip(b'=')
+ return sha256Prefix + b64encoded.decode("utf-8")
+
+
+class OpenShiftAuthModule(AnsibleModule):
+ def __init__(self):
+ AnsibleModule.__init__(
+ self,
+ argument_spec=K8S_AUTH_ARG_SPEC,
+ required_if=[
+ ('state', 'present', ['username', 'password']),
+ ('state', 'absent', ['api_key']),
+ ]
+ )
+
+ if not HAS_REQUESTS:
+ self.fail("This module requires the python 'requests' package. Try `pip install requests`.")
+
+ if not HAS_REQUESTS_OAUTH:
+ self.fail("This module requires the python 'requests-oauthlib' package. Try `pip install requests-oauthlib`.")
+
+ if not HAS_URLLIB3:
+ self.fail("This module requires the python 'urllib3' package. Try `pip install urllib3`.")
+
+ def execute_module(self):
+ state = self.params.get('state')
+ verify_ssl = self.params.get('validate_certs')
+ ssl_ca_cert = self.params.get('ca_cert')
+
+ self.auth_username = self.params.get('username')
+ self.auth_password = self.params.get('password')
+ self.auth_api_key = self.params.get('api_key')
+ self.con_host = self.params.get('host')
+
+ # python-requests takes either a bool or a path to a ca file as the 'verify' param
+ if verify_ssl and ssl_ca_cert:
+ self.con_verify_ca = ssl_ca_cert # path
+ else:
+ self.con_verify_ca = verify_ssl # bool
+
+ # Get needed info to access authorization APIs
+ self.openshift_discover()
+
+ changed = False
+ result = dict()
+ if state == 'present':
+ new_api_key = self.openshift_login()
+ result = dict(
+ host=self.con_host,
+ validate_certs=verify_ssl,
+ ca_cert=ssl_ca_cert,
+ api_key=new_api_key,
+ username=self.auth_username,
+ )
+ else:
+ changed = self.openshift_logout()
+
+ # return k8s_auth as well for backwards compatibility
+ self.exit_json(changed=changed, openshift_auth=result, k8s_auth=result)
+
+ def openshift_discover(self):
+ url = urljoin(self.con_host, '.well-known/oauth-authorization-server')
+ ret = requests.get(url, verify=self.con_verify_ca)
+
+ if ret.status_code != 200:
+ self.fail_request("Couldn't find OpenShift's OAuth API", method='GET', url=url,
+ reason=ret.reason, status_code=ret.status_code)
+
+ try:
+ oauth_info = ret.json()
+
+ self.openshift_auth_endpoint = oauth_info['authorization_endpoint']
+ self.openshift_token_endpoint = oauth_info['token_endpoint']
+ except Exception:
+ self.fail_json(msg="Something went wrong discovering OpenShift OAuth details.",
+ exception=traceback.format_exc())
+
+ def openshift_login(self):
+ os_oauth = OAuth2Session(client_id='openshift-challenging-client')
+ authorization_url, state = os_oauth.authorization_url(self.openshift_auth_endpoint,
+ state="1", code_challenge_method='S256')
+ auth_headers = make_headers(basic_auth='{0}:{1}'.format(self.auth_username, self.auth_password))
+
+ # Request authorization code using basic auth credentials
+ ret = os_oauth.get(
+ authorization_url,
+ headers={'X-Csrf-Token': state, 'authorization': auth_headers.get('authorization')},
+ verify=self.con_verify_ca,
+ allow_redirects=False
+ )
+
+ if ret.status_code != 302:
+ self.fail_request("Authorization failed.", method='GET', url=authorization_url,
+ reason=ret.reason, status_code=ret.status_code)
+
+ # In here we have `code` and `state`, I think `code` is the important one
+ qwargs = {}
+ for k, v in parse_qs(urlparse(ret.headers['Location']).query).items():
+ qwargs[k] = v[0]
+ qwargs['grant_type'] = 'authorization_code'
+
+ # Using authorization code given to us in the Location header of the previous request, request a token
+ ret = os_oauth.post(
+ self.openshift_token_endpoint,
+ headers={
+ 'Accept': 'application/json',
+ 'Content-Type': 'application/x-www-form-urlencoded',
+ # This is just base64 encoded 'openshift-challenging-client:'
+ 'Authorization': 'Basic b3BlbnNoaWZ0LWNoYWxsZW5naW5nLWNsaWVudDo='
+ },
+ data=urlencode(qwargs),
+ verify=self.con_verify_ca
+ )
+
+ if ret.status_code != 200:
+ self.fail_request("Failed to obtain an authorization token.", method='POST',
+ url=self.openshift_token_endpoint,
+ reason=ret.reason, status_code=ret.status_code)
+
+ return ret.json()['access_token']
+
+ def openshift_logout(self):
+
+ name = get_oauthaccesstoken_objectname_from_token(self.auth_api_key)
+ headers = {
+ 'Accept': 'application/json',
+ 'Content-Type': 'application/json',
+ 'Authorization': "Bearer {0}".format(self.auth_api_key)
+ }
+
+ url = "{0}/apis/oauth.openshift.io/v1/useroauthaccesstokens/{1}".format(self.con_host, name)
+ json = {
+ "apiVersion": "oauth.openshift.io/v1",
+ "kind": "DeleteOptions",
+ "gracePeriodSeconds": 0
+ }
+
+ ret = requests.delete(url, json=json, verify=self.con_verify_ca, headers=headers)
+ if ret.status_code != 200:
+ self.fail_json(
+ msg="Couldn't delete user oauth access token '{0}' due to: {1}".format(name, ret.json().get("message")),
+ status_code=ret.status_code
+ )
+
+ return True
+
+ def fail(self, msg=None):
+ self.fail_json(msg=msg)
+
+ def fail_request(self, msg, **kwargs):
+ req_info = {}
+ for k, v in kwargs.items():
+ req_info['req_' + k] = v
+ self.fail_json(msg=msg, **req_info)
+
+
+def main():
+ module = OpenShiftAuthModule()
+ try:
+ module.execute_module()
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/okd/plugins/modules/openshift_build.py b/ansible_collections/community/okd/plugins/modules/openshift_build.py
new file mode 100644
index 000000000..1259a102c
--- /dev/null
+++ b/ansible_collections/community/okd/plugins/modules/openshift_build.py
@@ -0,0 +1,260 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2021, Red Hat
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+# STARTREMOVE (downstream)
+DOCUMENTATION = r'''
+
+module: openshift_build
+
+short_description: Start a new build or Cancel running, pending, or new builds.
+
+version_added: "2.3.0"
+
+author:
+ - Aubin Bikouo (@abikouo)
+
+description:
+ - This module starts a new build from the provided build config or build name.
+ - This module also cancel a new, pending or running build by requesting a graceful shutdown of the build.
+ There may be a delay between requesting the build and the time the build is terminated.
+ - This can also restart a new build when the current is cancelled.
+ - Analogous to C(oc cancel-build) and C(oc start-build).
+
+extends_documentation_fragment:
+ - kubernetes.core.k8s_auth_options
+
+options:
+ state:
+ description:
+ - Determines if a Build should be started ,cancelled or restarted.
+ - When set to C(restarted) a new build will be created after the current build is cancelled.
+ choices:
+ - started
+ - cancelled
+ - restarted
+ default: started
+ type: str
+ build_name:
+ description:
+ - Specify the name of a build which should be re-run.
+ - Mutually exclusive with parameter I(build_config_name).
+ type: str
+ build_config_name:
+ description:
+ - Specify the name of a build config from which a new build will be run.
+ - Mutually exclusive with parameter I(build_name).
+ type: str
+ namespace:
+ description:
+ - Specify the namespace for the build or the build config.
+ type: str
+ required: True
+ build_args:
+ description:
+ - Specify a list of key-value pair to pass to Docker during the build.
+ type: list
+ elements: dict
+ suboptions:
+ name:
+ description:
+ - docker build argument name.
+ type: str
+ required: true
+ value:
+ description:
+ - docker build argument value.
+ type: str
+ required: true
+ commit:
+ description:
+ - Specify the source code commit identifier the build should use;
+ requires a build based on a Git repository.
+ type: str
+ env_vars:
+ description:
+ - Specify a list of key-value pair for an environment variable to set for the build container.
+ type: list
+ elements: dict
+ suboptions:
+ name:
+ description:
+ - Environment variable name.
+ type: str
+ required: true
+ value:
+ description:
+ - Environment variable value.
+ type: str
+ required: true
+ incremental:
+ description:
+ - Overrides the incremental setting in a source-strategy build, ignored if not specified.
+ type: bool
+ no_cache:
+ description:
+ - Overrides the noCache setting in a docker-strategy build, ignored if not specified.
+ type: bool
+ wait:
+ description:
+ - When C(state=started), specify whether to wait for a build to complete
+ and exit with a non-zero return code if the build fails.
+ - When I(state=cancelled), specify whether to wait for a build phase to be Cancelled.
+ default: False
+ type: bool
+ wait_sleep:
+ description:
+ - Number of seconds to sleep between checks.
+ - Ignored if C(wait=false).
+ default: 5
+ type: int
+ wait_timeout:
+ description:
+ - How long in seconds to wait for a build to complete.
+ - Ignored if C(wait=false).
+ default: 120
+ type: int
+ build_phases:
+ description:
+ - List of state for build to cancel.
+ - Ignored when C(state=started).
+ type: list
+ elements: str
+ choices:
+ - New
+ - Pending
+ - Running
+ default: []
+
+requirements:
+ - python >= 3.6
+ - kubernetes >= 12.0.0
+'''
+
+EXAMPLES = r'''
+# Starts build from build config default/hello-world
+- name: Starts build from build config
+ community.okd.openshift_build:
+ namespace: default
+ build_config_name: hello-world
+
+# Starts build from a previous build "default/hello-world-1"
+- name: Starts build from a previous build
+ community.okd.openshift_build:
+ namespace: default
+ build_name: hello-world-1
+
+# Cancel the build with the given name
+- name: Cancel build from default namespace
+ community.okd.openshift_build:
+ namespace: "default"
+ build_name: ruby-build-1
+ state: cancelled
+
+# Cancel the named build and create a new one with the same parameters
+- name: Cancel build from default namespace and create a new one
+ community.okd.openshift_build:
+ namespace: "default"
+ build_name: ruby-build-1
+ state: restarted
+
+# Cancel all builds created from 'ruby-build' build configuration that are in 'new' state
+- name: Cancel build from default namespace and create a new one
+ community.okd.openshift_build:
+ namespace: "default"
+ build_config_name: ruby-build
+ build_phases:
+ - New
+ state: cancelled
+'''
+
+RETURN = r'''
+builds:
+ description:
+ - The builds that were started/cancelled.
+ returned: success
+ type: complex
+ contains:
+ api_version:
+ description: The versioned schema of this representation of an object.
+ returned: success
+ type: str
+ kind:
+ description: Represents the REST resource this object represents.
+ returned: success
+ type: str
+ metadata:
+ description: Standard object metadata. Includes name, namespace, annotations, labels, etc.
+ returned: success
+ type: dict
+ spec:
+ description: Specific attributes of the build.
+ returned: success
+ type: dict
+ status:
+ description: Current status details for the object.
+ returned: success
+ type: dict
+'''
+# ENDREMOVE (downstream)
+
+import copy
+
+from ansible_collections.kubernetes.core.plugins.module_utils.args_common import AUTH_ARG_SPEC
+
+
+def argument_spec():
+ args = copy.deepcopy(AUTH_ARG_SPEC)
+
+ args_options = dict(
+ name=dict(type='str', required=True),
+ value=dict(type='str', required=True)
+ )
+
+ args.update(
+ dict(
+ state=dict(type='str', choices=['started', 'cancelled', 'restarted'], default="started"),
+ build_args=dict(type='list', elements='dict', options=args_options),
+ commit=dict(type='str'),
+ env_vars=dict(type='list', elements='dict', options=args_options),
+ build_name=dict(type='str'),
+ build_config_name=dict(type='str'),
+ namespace=dict(type='str', required=True),
+ incremental=dict(type='bool'),
+ no_cache=dict(type='bool'),
+ wait=dict(type='bool', default=False),
+ wait_sleep=dict(type='int', default=5),
+ wait_timeout=dict(type='int', default=120),
+ build_phases=dict(type='list', elements='str', default=[], choices=["New", "Pending", "Running"]),
+ )
+ )
+ return args
+
+
+def main():
+ mutually_exclusive = [
+ ('build_name', 'build_config_name'),
+ ]
+ from ansible_collections.community.okd.plugins.module_utils.openshift_builds import (
+ OpenShiftBuilds
+ )
+ module = OpenShiftBuilds(
+ argument_spec=argument_spec(),
+ mutually_exclusive=mutually_exclusive,
+ required_one_of=[
+ [
+ 'build_name',
+ 'build_config_name',
+ ]
+ ],
+ )
+ module.run_module()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/okd/plugins/modules/openshift_import_image.py b/ansible_collections/community/okd/plugins/modules/openshift_import_image.py
new file mode 100644
index 000000000..df0588cf4
--- /dev/null
+++ b/ansible_collections/community/okd/plugins/modules/openshift_import_image.py
@@ -0,0 +1,194 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2021, Red Hat
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+# STARTREMOVE (downstream)
+DOCUMENTATION = r'''
+
+module: openshift_import_image
+
+short_description: Import the latest image information from a tag in a container image registry.
+
+version_added: "2.2.0"
+
+author:
+ - Aubin Bikouo (@abikouo)
+
+description:
+ - Image streams allow you to control which images are rolled out to your builds and applications.
+ - This module fetches the latest version of an image from a remote repository and updates the image stream tag
+ if it does not match the previous value.
+ - Running the module multiple times will not create duplicate entries.
+ - When importing an image, only the image metadata is copied, not the image contents.
+ - Analogous to C(oc import-image).
+
+extends_documentation_fragment:
+ - kubernetes.core.k8s_auth_options
+
+options:
+ namespace:
+ description:
+ - Use to specify namespace for image stream to create/update.
+ type: str
+ required: True
+ name:
+ description:
+ - Image stream to import tag into.
+ - This can be provided as a list of images streams or a single value.
+ type: raw
+ required: True
+ all:
+ description:
+ - If set to I(true), import all tags from the provided source on creation or if C(source) is specified.
+ type: bool
+ default: False
+ validate_registry_certs:
+ description:
+ - If set to I(true), allow importing from registries that have invalid HTTPS certificates.
+ or are hosted via HTTP. This parameter will take precedence over the insecure annotation.
+ type: bool
+ reference_policy:
+ description:
+ - Allow to request pullthrough for external image when set to I(local).
+ default: source
+ choices:
+ - source
+ - local
+ type: str
+ scheduled:
+ description:
+ - Set each imported Docker image to be periodically imported from a remote repository.
+ type: bool
+ default: False
+ source:
+ description:
+ - A Docker image repository to import images from.
+ - Should be provided as 'registry.io/repo/image'
+ type: str
+
+requirements:
+ - python >= 3.6
+ - kubernetes >= 12.0.0
+ - docker-image-py
+'''
+
+EXAMPLES = r'''
+# Import tag latest into a new image stream.
+- name: Import tag latest into new image stream
+ community.okd.openshift_import_image:
+ namespace: testing
+ name: mystream
+ source: registry.io/repo/image:latest
+
+# Update imported data for tag latest in an already existing image stream.
+- name: Update imported data for tag latest
+ community.okd.openshift_import_image:
+ namespace: testing
+ name: mystream
+
+# Update imported data for tag 'stable' in an already existing image stream.
+- name: Update imported data for tag latest
+ community.okd.openshift_import_image:
+ namespace: testing
+ name: mystream:stable
+
+# Update imported data for all tags in an existing image stream.
+- name: Update imported data for all tags
+ community.okd.openshift_import_image:
+ namespace: testing
+ name: mystream
+ all: true
+
+# Import all tags into a new image stream.
+- name: Import all tags into a new image stream.
+ community.okd.openshift_import_image:
+ namespace: testing
+ name: mystream
+ source: registry.io/repo/image:latest
+ all: true
+
+# Import all tags into a new image stream for a list of image streams
+- name: Import all tags into a new image stream.
+ community.okd.openshift_import_image:
+ namespace: testing
+ name:
+ - mystream1
+ - mystream2
+ - mystream3
+ source: registry.io/repo/image:latest
+ all: true
+'''
+
+
+RETURN = r'''
+result:
+ description:
+ - List with all ImageStreamImport that have been created.
+ type: list
+ returned: success
+ elements: dict
+ contains:
+ api_version:
+ description: The versioned schema of this representation of an object.
+ returned: success
+ type: str
+ kind:
+ description: Represents the REST resource this object represents.
+ returned: success
+ type: str
+ metadata:
+ description: Standard object metadata. Includes name, namespace, annotations, labels, etc.
+ returned: success
+ type: dict
+ spec:
+ description: Specific attributes of the object. Will vary based on the I(api_version) and I(kind).
+ returned: success
+ type: dict
+ status:
+ description: Current status details for the object.
+ returned: success
+ type: dict
+'''
+# ENDREMOVE (downstream)
+
+import copy
+
+from ansible_collections.kubernetes.core.plugins.module_utils.args_common import AUTH_ARG_SPEC
+
+
+def argument_spec():
+ args = copy.deepcopy(AUTH_ARG_SPEC)
+ args.update(
+ dict(
+ namespace=dict(type='str', required=True),
+ name=dict(type='raw', required=True),
+ all=dict(type='bool', default=False),
+ validate_registry_certs=dict(type='bool'),
+ reference_policy=dict(type='str', choices=["source", "local"], default="source"),
+ scheduled=dict(type='bool', default=False),
+ source=dict(type='str'),
+ )
+ )
+ return args
+
+
+def main():
+
+ from ansible_collections.community.okd.plugins.module_utils.openshift_import_image import (
+ OpenShiftImportImage
+ )
+
+ module = OpenShiftImportImage(
+ argument_spec=argument_spec(),
+ supports_check_mode=True
+ )
+ module.run_module()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/okd/plugins/modules/openshift_process.py b/ansible_collections/community/okd/plugins/modules/openshift_process.py
new file mode 100644
index 000000000..fb00ffbba
--- /dev/null
+++ b/ansible_collections/community/okd/plugins/modules/openshift_process.py
@@ -0,0 +1,236 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+# Copyright (c) 2020-2021, Red Hat
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# STARTREMOVE (downstream)
+DOCUMENTATION = r'''
+module: openshift_process
+
+short_description: Process an OpenShift template.openshift.io/v1 Template
+
+version_added: "0.3.0"
+
+author: "Fabian von Feilitzsch (@fabianvf)"
+
+description:
+ - Processes a specified OpenShift template with the provided template.
+ - Templates can be provided inline, from a file, or specified by name and namespace in the cluster.
+ - Analogous to `oc process`.
+ - For CRUD operations on Template resources themselves, see the community.okd.k8s module.
+
+extends_documentation_fragment:
+ - kubernetes.core.k8s_auth_options
+ - kubernetes.core.k8s_wait_options
+ - kubernetes.core.k8s_resource_options
+
+requirements:
+ - "python >= 3.6"
+ - "kubernetes >= 12.0.0"
+ - "PyYAML >= 3.11"
+
+options:
+ name:
+ description:
+ - The name of the Template to process.
+ - The Template must be present in the cluster.
+ - When provided, I(namespace) is required.
+ - Mutually exclusive with I(resource_definition) or I(src)
+ type: str
+ namespace:
+ description:
+ - The namespace that the template can be found in.
+ type: str
+ namespace_target:
+ description:
+ - The namespace that resources should be created, updated, or deleted in.
+ - Only used when I(state) is present or absent.
+ parameters:
+ description:
+ - 'A set of key: value pairs that will be used to set/override values in the Template.'
+ - Corresponds to the `--param` argument to oc process.
+ type: dict
+ parameter_file:
+ description:
+ - A path to a file containing template parameter values to override/set values in the Template.
+ - Corresponds to the `--param-file` argument to oc process.
+ type: str
+ state:
+ description:
+ - Determines what to do with the rendered Template.
+ - The state I(rendered) will render the Template based on the provided parameters, and return the rendered
+ objects in the I(resources) field. These can then be referenced in future tasks.
+ - The state I(present) will cause the resources in the rendered Template to be created if they do not
+ already exist, and patched if they do.
+ - The state I(absent) will delete the resources in the rendered Template.
+ type: str
+ default: rendered
+ choices: [ absent, present, rendered ]
+'''
+
+EXAMPLES = r'''
+- name: Process a template in the cluster
+ community.okd.openshift_process:
+ name: nginx-example
+ namespace: openshift # only needed if using a template already on the server
+ parameters:
+ NAMESPACE: openshift
+ NAME: test123
+ state: rendered
+ register: result
+
+- name: Create the rendered resources using apply
+ community.okd.k8s:
+ namespace: default
+ definition: '{{ item }}'
+ wait: yes
+ apply: yes
+ loop: '{{ result.resources }}'
+
+- name: Process a template with parameters from an env file and create the resources
+ community.okd.openshift_process:
+ name: nginx-example
+ namespace: openshift
+ namespace_target: default
+ parameter_file: 'files/nginx.env'
+ state: present
+ wait: yes
+
+- name: Process a local template and create the resources
+ community.okd.openshift_process:
+ src: files/example-template.yaml
+ parameter_file: files/example.env
+ namespace_target: default
+ state: present
+
+- name: Process a local template, delete the resources, and wait for them to terminate
+ community.okd.openshift_process:
+ src: files/example-template.yaml
+ parameter_file: files/example.env
+ namespace_target: default
+ state: absent
+ wait: yes
+'''
+
+RETURN = r'''
+result:
+ description:
+ - The created, patched, or otherwise present object. Will be empty in the case of a deletion.
+ returned: on success when state is present or absent
+ type: complex
+ contains:
+ apiVersion:
+ description: The versioned schema of this representation of an object.
+ returned: success
+ type: str
+ kind:
+ description: Represents the REST resource this object represents.
+ returned: success
+ type: str
+ metadata:
+ description: Standard object metadata. Includes name, namespace, annotations, labels, etc.
+ returned: success
+ type: complex
+ contains:
+ name:
+ description: The name of the resource
+ type: str
+ namespace:
+ description: The namespace of the resource
+ type: str
+ spec:
+ description: Specific attributes of the object. Will vary based on the I(api_version) and I(kind).
+ returned: success
+ type: dict
+ status:
+ description: Current status details for the object.
+ returned: success
+ type: complex
+ contains:
+ conditions:
+ type: complex
+ description: Array of status conditions for the object. Not guaranteed to be present
+ items:
+ description: Returned only when multiple yaml documents are passed to src or resource_definition
+ returned: when resource_definition or src contains list of objects
+ type: list
+ duration:
+ description: elapsed time of task in seconds
+ returned: when C(wait) is true
+ type: int
+ sample: 48
+resources:
+ type: complex
+ description:
+ - The rendered resources defined in the Template
+ returned: on success when state is rendered
+ contains:
+ apiVersion:
+ description: The versioned schema of this representation of an object.
+ returned: success
+ type: str
+ kind:
+ description: Represents the REST resource this object represents.
+ returned: success
+ type: str
+ metadata:
+ description: Standard object metadata. Includes name, namespace, annotations, labels, etc.
+ returned: success
+ type: complex
+ contains:
+ name:
+ description: The name of the resource
+ type: str
+ namespace:
+ description: The namespace of the resource
+ type: str
+ spec:
+ description: Specific attributes of the object. Will vary based on the I(api_version) and I(kind).
+ returned: success
+ type: dict
+ status:
+ description: Current status details for the object.
+ returned: success
+ type: dict
+ contains:
+ conditions:
+ type: complex
+ description: Array of status conditions for the object. Not guaranteed to be present
+'''
+# ENDREMOVE (downstream)
+
+from ansible_collections.kubernetes.core.plugins.module_utils.args_common import (
+ AUTH_ARG_SPEC, RESOURCE_ARG_SPEC, WAIT_ARG_SPEC
+)
+
+
+def argspec():
+ argument_spec = {}
+ argument_spec.update(AUTH_ARG_SPEC)
+ argument_spec.update(WAIT_ARG_SPEC)
+ argument_spec.update(RESOURCE_ARG_SPEC)
+ argument_spec['state'] = dict(type='str', default='rendered', choices=['present', 'absent', 'rendered'])
+ argument_spec['namespace'] = dict(type='str')
+ argument_spec['namespace_target'] = dict(type='str')
+ argument_spec['parameters'] = dict(type='dict')
+ argument_spec['name'] = dict(type='str')
+ argument_spec['parameter_file'] = dict(type='str')
+
+ return argument_spec
+
+
+def main():
+
+ from ansible_collections.community.okd.plugins.module_utils.openshift_process import (
+ OpenShiftProcess)
+
+ module = OpenShiftProcess(argument_spec=argspec(), supports_check_mode=True)
+ module.run_module()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/okd/plugins/modules/openshift_registry_info.py b/ansible_collections/community/okd/plugins/modules/openshift_registry_info.py
new file mode 100644
index 000000000..a455ac50b
--- /dev/null
+++ b/ansible_collections/community/okd/plugins/modules/openshift_registry_info.py
@@ -0,0 +1,114 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2021, Red Hat
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+# STARTREMOVE (downstream)
+DOCUMENTATION = r'''
+
+module: openshift_registry_info
+
+short_description: Display information about the integrated registry.
+
+version_added: "2.2.0"
+
+author:
+ - Aubin Bikouo (@abikouo)
+
+description:
+ - This module exposes information about the integrated registry.
+ - Use C(check) to verify your local client can access the registry.
+ - If the adminstrator has not configured a public hostname for the registry then
+ this command may fail when run outside of the server.
+ - Analogous to C(oc registry info).
+
+extends_documentation_fragment:
+ - kubernetes.core.k8s_auth_options
+
+options:
+ check:
+ description:
+ - Attempt to contact the integrated registry using local client.
+ type: bool
+ default: False
+
+requirements:
+ - python >= 3.6
+ - kubernetes >= 12.0.0
+ - docker-image-py
+'''
+
+EXAMPLES = r'''
+# Get registry information
+- name: Read integrated registry information
+ community.okd.openshift_registry_info:
+
+# Read registry integrated information and attempt to contact using local client.
+- name: Attempt to contact integrated registry using local client
+ community.okd.openshift_registry_info:
+ check: yes
+'''
+
+
+RETURN = r'''
+internal_hostname:
+ description:
+ - The internal registry hostname.
+ type: str
+ returned: success
+public_hostname:
+ description:
+ - The public registry hostname.
+ type: str
+ returned: success
+check:
+ description:
+ - Whether the local client can contact or not the registry.
+ type: dict
+ returned: success
+ contains:
+ reached:
+ description: Whether the registry has been reached or not.
+ returned: success
+ type: str
+ msg:
+ description: message describing the ping operation.
+ returned: always
+ type: str
+'''
+# ENDREMOVE (downstream)
+
+import copy
+
+from ansible_collections.kubernetes.core.plugins.module_utils.args_common import AUTH_ARG_SPEC
+
+
+def argument_spec():
+ args = copy.deepcopy(AUTH_ARG_SPEC)
+ args.update(
+ dict(
+ check=dict(type='bool', default=False)
+ )
+ )
+ return args
+
+
+def main():
+
+ from ansible_collections.community.okd.plugins.module_utils.openshift_registry import (
+ OpenShiftRegistry
+ )
+
+ module = OpenShiftRegistry(
+ argument_spec=argument_spec(),
+ supports_check_mode=True
+ )
+ module.run_module()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/okd/plugins/modules/openshift_route.py b/ansible_collections/community/okd/plugins/modules/openshift_route.py
new file mode 100644
index 000000000..e452fc534
--- /dev/null
+++ b/ansible_collections/community/okd/plugins/modules/openshift_route.py
@@ -0,0 +1,542 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2020, Red Hat
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+# STARTREMOVE (downstream)
+DOCUMENTATION = r'''
+module: openshift_route
+
+short_description: Expose a Service as an OpenShift Route.
+
+version_added: "0.3.0"
+
+author: "Fabian von Feilitzsch (@fabianvf)"
+
+description:
+ - Looks up a Service and creates a new Route based on it.
+ - Analogous to `oc expose` and `oc create route` for creating Routes, but does not support creating Services.
+ - For creating Services from other resources, see kubernetes.core.k8s.
+
+extends_documentation_fragment:
+ - kubernetes.core.k8s_auth_options
+ - kubernetes.core.k8s_wait_options
+ - kubernetes.core.k8s_state_options
+
+requirements:
+ - "python >= 3.6"
+ - "kubernetes >= 12.0.0"
+ - "PyYAML >= 3.11"
+
+options:
+ service:
+ description:
+ - The name of the service to expose.
+ - Required when I(state) is not absent.
+ type: str
+ aliases: ['svc']
+ namespace:
+ description:
+ - The namespace of the resource being targeted.
+ - The Route will be created in this namespace as well.
+ required: yes
+ type: str
+ labels:
+ description:
+ - Specify the labels to apply to the created Route.
+ - 'A set of key: value pairs.'
+ type: dict
+ annotations:
+ description:
+ - Specify the Route Annotations.
+ - 'A set of key: value pairs.'
+ type: dict
+ version_added: "2.1.0"
+ name:
+ description:
+ - The desired name of the Route to be created.
+ - Defaults to the value of I(service)
+ type: str
+ hostname:
+ description:
+ - The hostname for the Route.
+ type: str
+ path:
+ description:
+ - The path for the Route
+ type: str
+ wildcard_policy:
+ description:
+ - The wildcard policy for the hostname.
+ - Currently only Subdomain is supported.
+ - If not provided, the default of None will be used.
+ choices:
+ - Subdomain
+ type: str
+ port:
+ description:
+ - Name or number of the port the Route will route traffic to.
+ type: str
+ tls:
+ description:
+ - TLS configuration for the newly created route.
+ - Only used when I(termination) is set.
+ type: dict
+ suboptions:
+ ca_certificate:
+ description:
+ - Path to a CA certificate file on the target host.
+ - Not supported when I(termination) is set to passthrough.
+ type: str
+ certificate:
+ description:
+ - Path to a certificate file on the target host.
+ - Not supported when I(termination) is set to passthrough.
+ type: str
+ destination_ca_certificate:
+ description:
+ - Path to a CA certificate file used for securing the connection.
+ - Only used when I(termination) is set to reencrypt.
+ - Defaults to the Service CA.
+ type: str
+ key:
+ description:
+ - Path to a key file on the target host.
+ - Not supported when I(termination) is set to passthrough.
+ type: str
+ insecure_policy:
+ description:
+ - Sets the InsecureEdgeTerminationPolicy for the Route.
+ - Not supported when I(termination) is set to reencrypt.
+ - When I(termination) is set to passthrough, only redirect is supported.
+ - If not provided, insecure traffic will be disallowed.
+ type: str
+ choices:
+ - allow
+ - redirect
+ - disallow
+ default: disallow
+ termination:
+ description:
+ - The termination type of the Route.
+ - If left empty no termination type will be set, and the route will be insecure.
+ - When set to insecure I(tls) will be ignored.
+ choices:
+ - edge
+ - passthrough
+ - reencrypt
+ - insecure
+ default: insecure
+ type: str
+'''
+
+EXAMPLES = r'''
+- name: Create hello-world deployment
+ community.okd.k8s:
+ definition:
+ apiVersion: apps/v1
+ kind: Deployment
+ metadata:
+ name: hello-kubernetes
+ namespace: default
+ spec:
+ replicas: 3
+ selector:
+ matchLabels:
+ app: hello-kubernetes
+ template:
+ metadata:
+ labels:
+ app: hello-kubernetes
+ spec:
+ containers:
+ - name: hello-kubernetes
+ image: paulbouwer/hello-kubernetes:1.8
+ ports:
+ - containerPort: 8080
+
+- name: Create Service for the hello-world deployment
+ community.okd.k8s:
+ definition:
+ apiVersion: v1
+ kind: Service
+ metadata:
+ name: hello-kubernetes
+ namespace: default
+ spec:
+ ports:
+ - port: 80
+ targetPort: 8080
+ selector:
+ app: hello-kubernetes
+
+- name: Expose the insecure hello-world service externally
+ community.okd.openshift_route:
+ service: hello-kubernetes
+ namespace: default
+ insecure_policy: allow
+ annotations:
+ haproxy.router.openshift.io/balance: roundrobin
+ register: route
+'''
+
+RETURN = r'''
+result:
+ description:
+ - The Route object that was created or updated. Will be empty in the case of deletion.
+ returned: success
+ type: complex
+ contains:
+ apiVersion:
+ description: The versioned schema of this representation of an object.
+ returned: success
+ type: str
+ kind:
+ description: Represents the REST resource this object represents.
+ returned: success
+ type: str
+ metadata:
+ description: Standard object metadata. Includes name, namespace, annotations, labels, etc.
+ returned: success
+ type: complex
+ contains:
+ name:
+ description: The name of the created Route
+ type: str
+ namespace:
+ description: The namespace of the create Route
+ type: str
+ spec:
+ description: Specification for the Route
+ returned: success
+ type: complex
+ contains:
+ host:
+ description: Host is an alias/DNS that points to the service.
+ type: str
+ path:
+ description: Path that the router watches for, to route traffic for to the service.
+ type: str
+ port:
+ description: Defines a port mapping from a router to an endpoint in the service endpoints.
+ type: complex
+ contains:
+ targetPort:
+ description: The target port on pods selected by the service this route points to.
+ type: str
+ tls:
+ description: Defines config used to secure a route and provide termination.
+ type: complex
+ contains:
+ caCertificate:
+ description: Provides the cert authority certificate contents.
+ type: str
+ certificate:
+ description: Provides certificate contents.
+ type: str
+ destinationCACertificate:
+ description: Provides the contents of the ca certificate of the final destination.
+ type: str
+ insecureEdgeTerminationPolicy:
+ description: Indicates the desired behavior for insecure connections to a route.
+ type: str
+ key:
+ description: Provides key file contents.
+ type: str
+ termination:
+ description: Indicates termination type.
+ type: str
+ to:
+ description: Specifies the target that resolve into endpoints.
+ type: complex
+ contains:
+ kind:
+ description: The kind of target that the route is referring to. Currently, only 'Service' is allowed.
+ type: str
+ name:
+ description: Name of the service/target that is being referred to. e.g. name of the service.
+ type: str
+ weight:
+ description: Specifies the target's relative weight against other target reference objects.
+ type: int
+ wildcardPolicy:
+ description: Wildcard policy if any for the route.
+ type: str
+ status:
+ description: Current status details for the Route
+ returned: success
+ type: complex
+ contains:
+ ingress:
+ description: List of places where the route may be exposed.
+ type: complex
+ contains:
+ conditions:
+ description: Array of status conditions for the Route ingress.
+ type: complex
+ contains:
+ type:
+ description: The type of the condition. Currently only 'Ready'.
+ type: str
+ status:
+ description: The status of the condition. Can be True, False, Unknown.
+ type: str
+ host:
+ description: The host string under which the route is exposed.
+ type: str
+ routerCanonicalHostname:
+ description: The external host name for the router that can be used as a CNAME for the host requested for this route. May not be set.
+ type: str
+ routerName:
+ description: A name chosen by the router to identify itself.
+ type: str
+ wildcardPolicy:
+ description: The wildcard policy that was allowed where this route is exposed.
+ type: str
+duration:
+ description: elapsed time of task in seconds
+ returned: when C(wait) is true
+ type: int
+ sample: 48
+'''
+# ENDREMOVE (downstream)
+
+import copy
+
+from ansible.module_utils._text import to_native
+
+from ansible_collections.community.okd.plugins.module_utils.openshift_common import AnsibleOpenshiftModule
+
+try:
+ from ansible_collections.kubernetes.core.plugins.module_utils.k8s.runner import perform_action
+ from ansible_collections.kubernetes.core.plugins.module_utils.k8s.waiter import Waiter
+ from ansible_collections.kubernetes.core.plugins.module_utils.args_common import (
+ AUTH_ARG_SPEC, WAIT_ARG_SPEC, COMMON_ARG_SPEC
+ )
+except ImportError as e:
+ pass
+ AUTH_ARG_SPEC = WAIT_ARG_SPEC = COMMON_ARG_SPEC = {}
+
+try:
+ from kubernetes.dynamic.exceptions import DynamicApiError, NotFoundError
+except ImportError:
+ pass
+
+
+class OpenShiftRoute(AnsibleOpenshiftModule):
+
+ def __init__(self):
+ super(OpenShiftRoute, self).__init__(
+ argument_spec=self.argspec,
+ supports_check_mode=True,
+ )
+
+ self.append_hash = False
+ self.apply = False
+ self.warnings = []
+ self.params['merge_type'] = None
+
+ @property
+ def argspec(self):
+ spec = copy.deepcopy(AUTH_ARG_SPEC)
+ spec.update(copy.deepcopy(WAIT_ARG_SPEC))
+ spec.update(copy.deepcopy(COMMON_ARG_SPEC))
+
+ spec['service'] = dict(type='str', aliases=['svc'])
+ spec['namespace'] = dict(required=True, type='str')
+ spec['labels'] = dict(type='dict')
+ spec['name'] = dict(type='str')
+ spec['hostname'] = dict(type='str')
+ spec['path'] = dict(type='str')
+ spec['wildcard_policy'] = dict(choices=['Subdomain'], type='str')
+ spec['port'] = dict(type='str')
+ spec['tls'] = dict(type='dict', options=dict(
+ ca_certificate=dict(type='str'),
+ certificate=dict(type='str'),
+ destination_ca_certificate=dict(type='str'),
+ key=dict(type='str', no_log=False),
+ insecure_policy=dict(type='str', choices=['allow', 'redirect', 'disallow'], default='disallow'),
+ ))
+ spec['termination'] = dict(choices=['edge', 'passthrough', 'reencrypt', 'insecure'], default='insecure')
+ spec['annotations'] = dict(type='dict')
+
+ return spec
+
+ def execute_module(self):
+
+ service_name = self.params.get('service')
+ namespace = self.params['namespace']
+ termination_type = self.params.get('termination')
+ if termination_type == 'insecure':
+ termination_type = None
+ state = self.params.get('state')
+
+ if state != 'absent' and not service_name:
+ self.fail_json("If 'state' is not 'absent' then 'service' must be provided")
+
+ # We need to do something a little wonky to wait if the user doesn't supply a custom condition
+ custom_wait = self.params.get('wait') and not self.params.get('wait_condition') and state != 'absent'
+ if custom_wait:
+ # Don't use default wait logic in perform_action
+ self.params['wait'] = False
+
+ route_name = self.params.get('name') or service_name
+ labels = self.params.get('labels')
+ hostname = self.params.get('hostname')
+ path = self.params.get('path')
+ wildcard_policy = self.params.get('wildcard_policy')
+ port = self.params.get('port')
+ annotations = self.params.get('annotations')
+
+ if termination_type and self.params.get('tls'):
+ tls_ca_cert = self.params['tls'].get('ca_certificate')
+ tls_cert = self.params['tls'].get('certificate')
+ tls_dest_ca_cert = self.params['tls'].get('destination_ca_certificate')
+ tls_key = self.params['tls'].get('key')
+ tls_insecure_policy = self.params['tls'].get('insecure_policy')
+ if tls_insecure_policy == 'disallow':
+ tls_insecure_policy = None
+ else:
+ tls_ca_cert = tls_cert = tls_dest_ca_cert = tls_key = tls_insecure_policy = None
+
+ route = {
+ 'apiVersion': 'route.openshift.io/v1',
+ 'kind': 'Route',
+ 'metadata': {
+ 'name': route_name,
+ 'namespace': namespace,
+ 'labels': labels,
+ },
+ 'spec': {}
+ }
+
+ if annotations:
+ route['metadata']['annotations'] = annotations
+
+ if state != 'absent':
+ route['spec'] = self.build_route_spec(
+ service_name, namespace,
+ port=port,
+ wildcard_policy=wildcard_policy,
+ hostname=hostname,
+ path=path,
+ termination_type=termination_type,
+ tls_insecure_policy=tls_insecure_policy,
+ tls_ca_cert=tls_ca_cert,
+ tls_cert=tls_cert,
+ tls_key=tls_key,
+ tls_dest_ca_cert=tls_dest_ca_cert,
+ )
+
+ result = perform_action(self.svc, route, self.params)
+ timeout = self.params.get('wait_timeout')
+ sleep = self.params.get('wait_sleep')
+ if custom_wait:
+ v1_routes = self.find_resource('Route', 'route.openshift.io/v1', fail=True)
+ waiter = Waiter(self.client, v1_routes, wait_predicate)
+ success, result['result'], result['duration'] = waiter.wait(timeout=timeout, sleep=sleep, name=route_name, namespace=namespace)
+
+ self.exit_json(**result)
+
+ def build_route_spec(self, service_name, namespace, port=None, wildcard_policy=None, hostname=None, path=None, termination_type=None,
+ tls_insecure_policy=None, tls_ca_cert=None, tls_cert=None, tls_key=None, tls_dest_ca_cert=None):
+ v1_services = self.find_resource('Service', 'v1', fail=True)
+ try:
+ target_service = v1_services.get(name=service_name, namespace=namespace)
+ except NotFoundError:
+ if not port:
+ self.fail_json(msg="You need to provide the 'port' argument when exposing a non-existent service")
+ target_service = None
+ except DynamicApiError as exc:
+ self.fail_json(msg='Failed to retrieve service to be exposed: {0}'.format(exc.body),
+ error=exc.status, status=exc.status, reason=exc.reason)
+ except Exception as exc:
+ self.fail_json(msg='Failed to retrieve service to be exposed: {0}'.format(to_native(exc)),
+ error='', status='', reason='')
+
+ route_spec = {
+ 'tls': {},
+ 'to': {
+ 'kind': 'Service',
+ 'name': service_name,
+ },
+ 'port': {
+ 'targetPort': self.set_port(target_service, port),
+ },
+ 'wildcardPolicy': wildcard_policy
+ }
+
+ # Want to conditionally add these so we don't overwrite what is automically added when nothing is provided
+ if termination_type:
+ route_spec['tls'] = dict(termination=termination_type.capitalize())
+ if tls_insecure_policy:
+ if termination_type == 'edge':
+ route_spec['tls']['insecureEdgeTerminationPolicy'] = tls_insecure_policy.capitalize()
+ elif termination_type == 'passthrough':
+ if tls_insecure_policy != 'redirect':
+ self.fail_json("'redirect' is the only supported insecureEdgeTerminationPolicy for passthrough routes")
+ route_spec['tls']['insecureEdgeTerminationPolicy'] = tls_insecure_policy.capitalize()
+ elif termination_type == 'reencrypt':
+ self.fail_json("'tls.insecure_policy' is not supported with reencrypt routes")
+ else:
+ route_spec['tls']['insecureEdgeTerminationPolicy'] = None
+ if tls_ca_cert:
+ if termination_type == 'passthrough':
+ self.fail_json("'tls.ca_certificate' is not supported with passthrough routes")
+ route_spec['tls']['caCertificate'] = tls_ca_cert
+ if tls_cert:
+ if termination_type == 'passthrough':
+ self.fail_json("'tls.certificate' is not supported with passthrough routes")
+ route_spec['tls']['certificate'] = tls_cert
+ if tls_key:
+ if termination_type == 'passthrough':
+ self.fail_json("'tls.key' is not supported with passthrough routes")
+ route_spec['tls']['key'] = tls_key
+ if tls_dest_ca_cert:
+ if termination_type != 'reencrypt':
+ self.fail_json("'destination_certificate' is only valid for reencrypt routes")
+ route_spec['tls']['destinationCACertificate'] = tls_dest_ca_cert
+ else:
+ route_spec['tls'] = None
+ if hostname:
+ route_spec['host'] = hostname
+ if path:
+ route_spec['path'] = path
+
+ return route_spec
+
+ def set_port(self, service, port_arg):
+ if port_arg:
+ return port_arg
+ for p in service.spec.ports:
+ if p.protocol == 'TCP':
+ if p.name is not None:
+ return p.name
+ return p.targetPort
+ return None
+
+
+def wait_predicate(route):
+ if not (route.status and route.status.ingress):
+ return False
+ for ingress in route.status.ingress:
+ match = [x for x in ingress.conditions if x.type == 'Admitted']
+ if not match:
+ return False
+ match = match[0]
+ if match.status != "True":
+ return False
+ return True
+
+
+def main():
+ OpenShiftRoute().run_module()
+
+
+if __name__ == '__main__':
+ main()