summaryrefslogtreecommitdiffstats
path: root/collections-debian-merged/ansible_collections/community/okd/plugins
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-14 20:03:01 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-14 20:03:01 +0000
commita453ac31f3428614cceb99027f8efbdb9258a40b (patch)
treef61f87408f32a8511cbd91799f9cececb53e0374 /collections-debian-merged/ansible_collections/community/okd/plugins
parentInitial commit. (diff)
downloadansible-a453ac31f3428614cceb99027f8efbdb9258a40b.tar.xz
ansible-a453ac31f3428614cceb99027f8efbdb9258a40b.zip
Adding upstream version 2.10.7+merged+base+2.10.8+dfsg.upstream/2.10.7+merged+base+2.10.8+dfsgupstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'collections-debian-merged/ansible_collections/community/okd/plugins')
-rw-r--r--collections-debian-merged/ansible_collections/community/okd/plugins/connection/oc.py173
-rw-r--r--collections-debian-merged/ansible_collections/community/okd/plugins/inventory/openshift.py203
-rw-r--r--collections-debian-merged/ansible_collections/community/okd/plugins/modules/k8s.py442
-rw-r--r--collections-debian-merged/ansible_collections/community/okd/plugins/modules/k8s_auth.py365
-rw-r--r--collections-debian-merged/ansible_collections/community/okd/plugins/modules/openshift_auth.py365
-rw-r--r--collections-debian-merged/ansible_collections/community/okd/plugins/modules/openshift_process.py389
-rw-r--r--collections-debian-merged/ansible_collections/community/okd/plugins/modules/openshift_route.py544
7 files changed, 2481 insertions, 0 deletions
diff --git a/collections-debian-merged/ansible_collections/community/okd/plugins/connection/oc.py b/collections-debian-merged/ansible_collections/community/okd/plugins/connection/oc.py
new file mode 100644
index 00000000..00bc26b8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/okd/plugins/connection/oc.py
@@ -0,0 +1,173 @@
+# Based on the docker connection plugin
+#
+# Connection plugin for configuring kubernetes containers with kubectl
+# (c) 2017, XuXinkun <xuxinkun@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author:
+ - xuxinkun
+
+ connection: oc
+
+ short_description: Execute tasks in pods running on OpenShift.
+
+ description:
+ - Use the oc exec command to run tasks in, or put/fetch files to, pods running on the OpenShift
+ container platform.
+
+
+ requirements:
+ - oc (go binary)
+
+ options:
+ oc_pod:
+ description:
+ - Pod name. Required when the host name does not match pod name.
+ default: ''
+ vars:
+ - name: ansible_oc_pod
+ env:
+ - name: K8S_AUTH_POD
+ oc_container:
+ description:
+ - Container name. Required when a pod contains more than one container.
+ default: ''
+ vars:
+ - name: ansible_oc_container
+ env:
+ - name: K8S_AUTH_CONTAINER
+ oc_namespace:
+ description:
+ - The namespace of the pod
+ default: ''
+ vars:
+ - name: ansible_oc_namespace
+ env:
+ - name: K8S_AUTH_NAMESPACE
+ oc_extra_args:
+ description:
+ - Extra arguments to pass to the oc command line.
+ default: ''
+ vars:
+ - name: ansible_oc_extra_args
+ env:
+ - name: K8S_AUTH_EXTRA_ARGS
+ oc_kubeconfig:
+ description:
+ - Path to a oc config file. Defaults to I(~/.kube/conig)
+ default: ''
+ vars:
+ - name: ansible_oc_kubeconfig
+ - name: ansible_oc_config
+ env:
+ - name: K8S_AUTH_KUBECONFIG
+ oc_context:
+ description:
+ - The name of a context found in the K8s config file.
+ default: ''
+ vars:
+ - name: ansible_oc_context
+ env:
+ - name: K8S_AUTH_CONTEXT
+ oc_host:
+ description:
+ - URL for accessing the API.
+ default: ''
+ vars:
+ - name: ansible_oc_host
+ - name: ansible_oc_server
+ env:
+ - name: K8S_AUTH_HOST
+ - name: K8S_AUTH_SERVER
+ oc_token:
+ description:
+ - API authentication bearer token.
+ vars:
+ - name: ansible_oc_token
+ - name: ansible_oc_api_key
+ env:
+ - name: K8S_AUTH_TOKEN
+ - name: K8S_AUTH_API_KEY
+ client_cert:
+ description:
+ - Path to a certificate used to authenticate with the API.
+ default: ''
+ vars:
+ - name: ansible_oc_cert_file
+ - name: ansible_oc_client_cert
+ env:
+ - name: K8S_AUTH_CERT_FILE
+ aliases: [ oc_cert_file ]
+ client_key:
+ description:
+ - Path to a key file used to authenticate with the API.
+ default: ''
+ vars:
+ - name: ansible_oc_key_file
+ - name: ansible_oc_client_key
+ env:
+ - name: K8S_AUTH_KEY_FILE
+ aliases: [ oc_key_file ]
+ ca_cert:
+ description:
+ - Path to a CA certificate used to authenticate with the API.
+ default: ''
+ vars:
+ - name: ansible_oc_ssl_ca_cert
+ - name: ansible_oc_ca_cert
+ env:
+ - name: K8S_AUTH_SSL_CA_CERT
+ aliases: [ oc_ssl_ca_cert ]
+ validate_certs:
+ description:
+ - Whether or not to verify the API server's SSL certificate. Defaults to I(true).
+ default: ''
+ vars:
+ - name: ansible_oc_verify_ssl
+ - name: ansible_oc_validate_certs
+ env:
+ - name: K8S_AUTH_VERIFY_SSL
+ aliases: [ oc_verify_ssl ]
+'''
+
+from ansible_collections.community.kubernetes.plugins.connection.kubectl import Connection as KubectlConnection
+
+
+CONNECTION_TRANSPORT = 'oc'
+
+CONNECTION_OPTIONS = {
+ 'oc_container': '-c',
+ 'oc_namespace': '-n',
+ 'oc_kubeconfig': '--config',
+ 'oc_context': '--context',
+ 'oc_host': '--server',
+ 'client_cert': '--client-certificate',
+ 'client_key': '--client-key',
+ 'ca_cert': '--certificate-authority',
+ 'validate_certs': '--insecure-skip-tls-verify',
+ 'oc_token': '--token'
+}
+
+
+class Connection(KubectlConnection):
+ ''' Local oc based connections '''
+ transport = CONNECTION_TRANSPORT
+ connection_options = CONNECTION_OPTIONS
+ documentation = DOCUMENTATION
diff --git a/collections-debian-merged/ansible_collections/community/okd/plugins/inventory/openshift.py b/collections-debian-merged/ansible_collections/community/okd/plugins/inventory/openshift.py
new file mode 100644
index 00000000..3666c8f2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/okd/plugins/inventory/openshift.py
@@ -0,0 +1,203 @@
+# Copyright (c) 2018 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ name: openshift
+ plugin_type: inventory
+ author:
+ - Chris Houseknecht <@chouseknecht>
+
+ short_description: OpenShift inventory source
+
+ description:
+ - Fetch containers, services and routes for one or more clusters
+ - Groups by cluster name, namespace, namespace_services, namespace_pods, namespace_routes, and labels
+ - Uses openshift.(yml|yaml) YAML configuration file to set parameter values.
+
+ options:
+ plugin:
+ description: token that ensures this is a source file for the 'openshift' plugin.
+ required: True
+ choices: ['openshift']
+ connections:
+ description:
+ - Optional list of cluster connection settings. If no connections are provided, the default
+ I(~/.kube/config) and active context will be used, and objects will be returned for all namespaces
+ the active user is authorized to access.
+ suboptions:
+ name:
+ description:
+ - Optional name to assign to the cluster. If not provided, a name is constructed from the server
+ and port.
+ kubeconfig:
+ description:
+ - Path to an existing Kubernetes config file. If not provided, and no other connection
+ options are provided, the OpenShift client will attempt to load the default
+ configuration file from I(~/.kube/config.json). Can also be specified via K8S_AUTH_KUBECONFIG
+ environment variable.
+ context:
+ description:
+ - The name of a context found in the config file. Can also be specified via K8S_AUTH_CONTEXT environment
+ variable.
+ host:
+ description:
+ - Provide a URL for accessing the API. Can also be specified via K8S_AUTH_HOST environment variable.
+ api_key:
+ description:
+ - Token used to authenticate with the API. Can also be specified via K8S_AUTH_API_KEY environment
+ variable.
+ username:
+ description:
+ - Provide a username for authenticating with the API. Can also be specified via K8S_AUTH_USERNAME
+ environment variable.
+ password:
+ description:
+ - Provide a password for authenticating with the API. Can also be specified via K8S_AUTH_PASSWORD
+ environment variable.
+ client_cert:
+ description:
+ - Path to a certificate used to authenticate with the API. Can also be specified via K8S_AUTH_CERT_FILE
+ environment variable.
+ aliases: [ cert_file ]
+ client_key:
+ description:
+ - Path to a key file used to authenticate with the API. Can also be specified via K8S_AUTH_KEY_FILE
+ environment variable.
+ aliases: [ key_file ]
+ ca_cert:
+ description:
+ - Path to a CA certificate used to authenticate with the API. Can also be specified via
+ K8S_AUTH_SSL_CA_CERT environment variable.
+ aliases: [ ssl_ca_cert ]
+ validate_certs:
+ description:
+ - "Whether or not to verify the API server's SSL certificates. Can also be specified via
+ K8S_AUTH_VERIFY_SSL environment variable."
+ type: bool
+ aliases: [ verify_ssl ]
+ namespaces:
+ description:
+ - List of namespaces. If not specified, will fetch all containers for all namespaces user is authorized
+ to access.
+
+ requirements:
+ - "python >= 2.7"
+ - "openshift >= 0.6"
+ - "PyYAML >= 3.11"
+'''
+
+EXAMPLES = '''
+# File must be named openshift.yaml or openshift.yml
+
+# Authenticate with token, and return all pods and services for all namespaces
+plugin: community.okd.openshift
+connections:
+ - host: https://192.168.64.4:8443
+ api_key: xxxxxxxxxxxxxxxx
+ verify_ssl: false
+
+# Use default config (~/.kube/config) file and active context, and return objects for a specific namespace
+plugin: community.okd.openshift
+connections:
+ - namespaces:
+ - testing
+
+# Use a custom config file, and a specific context.
+plugin: community.okd.openshift
+connections:
+ - kubeconfig: /path/to/config
+ context: 'awx/192-168-64-4:8443/developer'
+'''
+
+from ansible_collections.community.kubernetes.plugins.inventory.k8s import K8sInventoryException, InventoryModule as K8sInventoryModule, format_dynamic_api_exc
+
+try:
+ from openshift.dynamic.exceptions import DynamicApiError
+except ImportError:
+ pass
+
+
+class InventoryModule(K8sInventoryModule):
+ NAME = 'community.okd.openshift'
+
+ connection_plugin = 'community.okd.oc'
+ transport = 'oc'
+
+ def fetch_objects(self, connections):
+ super(InventoryModule, self).fetch_objects(connections)
+
+ if connections:
+ if not isinstance(connections, list):
+ raise K8sInventoryException("Expecting connections to be a list.")
+
+ for connection in connections:
+ client = self.get_api_client(**connection)
+ name = connection.get('name', self.get_default_host_name(client.configuration.host))
+ if connection.get('namespaces'):
+ namespaces = connection['namespaces']
+ else:
+ namespaces = self.get_available_namespaces(client)
+ for namespace in namespaces:
+ self.get_routes_for_namespace(client, name, namespace)
+ else:
+ client = self.get_api_client()
+ name = self.get_default_host_name(client.configuration.host)
+ namespaces = self.get_available_namespaces(client)
+ for namespace in namespaces:
+ self.get_routes_for_namespace(client, name, namespace)
+
+ def get_routes_for_namespace(self, client, name, namespace):
+ v1_route = client.resources.get(api_version='route.openshift.io/v1', kind='Route')
+ try:
+ obj = v1_route.get(namespace=namespace)
+ except DynamicApiError as exc:
+ self.display.debug(exc)
+ raise K8sInventoryException('Error fetching Routes list: %s' % format_dynamic_api_exc(exc))
+
+ namespace_group = 'namespace_{0}'.format(namespace)
+ namespace_routes_group = '{0}_routes'.format(namespace_group)
+
+ self.inventory.add_group(name)
+ self.inventory.add_group(namespace_group)
+ self.inventory.add_child(name, namespace_group)
+ self.inventory.add_group(namespace_routes_group)
+ self.inventory.add_child(namespace_group, namespace_routes_group)
+ for route in obj.items:
+ route_name = route.metadata.name
+ route_annotations = {} if not route.metadata.annotations else dict(route.metadata.annotations)
+
+ self.inventory.add_host(route_name)
+
+ if route.metadata.labels:
+ # create a group for each label_value
+ for key, value in route.metadata.labels:
+ group_name = 'label_{0}_{1}'.format(key, value)
+ self.inventory.add_group(group_name)
+ self.inventory.add_child(group_name, route_name)
+ route_labels = dict(route.metadata.labels)
+ else:
+ route_labels = {}
+
+ self.inventory.add_child(namespace_routes_group, route_name)
+
+ # add hostvars
+ self.inventory.set_variable(route_name, 'labels', route_labels)
+ self.inventory.set_variable(route_name, 'annotations', route_annotations)
+ self.inventory.set_variable(route_name, 'cluster_name', route.metadata.clusterName)
+ self.inventory.set_variable(route_name, 'object_type', 'route')
+ self.inventory.set_variable(route_name, 'self_link', route.metadata.selfLink)
+ self.inventory.set_variable(route_name, 'resource_version', route.metadata.resourceVersion)
+ self.inventory.set_variable(route_name, 'uid', route.metadata.uid)
+
+ if route.spec.host:
+ self.inventory.set_variable(route_name, 'host', route.spec.host)
+
+ if route.spec.path:
+ self.inventory.set_variable(route_name, 'path', route.spec.path)
+
+ if hasattr(route.spec.port, 'targetPort') and route.spec.port.targetPort:
+ self.inventory.set_variable(route_name, 'port', dict(route.spec.port))
diff --git a/collections-debian-merged/ansible_collections/community/okd/plugins/modules/k8s.py b/collections-debian-merged/ansible_collections/community/okd/plugins/modules/k8s.py
new file mode 100644
index 00000000..5aa640e2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/okd/plugins/modules/k8s.py
@@ -0,0 +1,442 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2018, Chris Houseknecht <@chouseknecht>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+
+from __future__ import absolute_import, division, print_function
+
+
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = '''
+
+module: k8s
+
+short_description: Manage OpenShift objects
+
+author:
+ - "Chris Houseknecht (@chouseknecht)"
+ - "Fabian von Feilitzsch (@fabianvf)"
+
+description:
+ - Use the OpenShift Python client to perform CRUD operations on K8s objects.
+ - Pass the object definition from a source file or inline. See examples for reading
+ files and using Jinja templates or vault-encrypted files.
+ - Access to the full range of K8s APIs.
+ - Use the M(k8s_info) module to obtain a list of items about an object of type C(kind)
+ - Authenticate using either a config file, certificates, password or token.
+ - Supports check mode.
+ - Optimized for OKD/OpenShift Kubernetes flavors
+
+extends_documentation_fragment:
+ - community.kubernetes.k8s_state_options
+ - community.kubernetes.k8s_name_options
+ - community.kubernetes.k8s_resource_options
+ - community.kubernetes.k8s_auth_options
+
+notes:
+ - If your OpenShift Python library is not 0.9.0 or newer and you are trying to
+ remove an item from an associative array/dictionary, for example a label or
+ an annotation, you will need to explicitly set the value of the item to be
+ removed to `null`. Simply deleting the entry in the dictionary will not
+ remove it from openshift or kubernetes.
+
+options:
+ merge_type:
+ description:
+ - Whether to override the default patch merge approach with a specific type. By default, the strategic
+ merge will typically be used.
+ - For example, Custom Resource Definitions typically aren't updatable by the usual strategic merge. You may
+ want to use C(merge) if you see "strategic merge patch format is not supported"
+ - See U(https://kubernetes.io/docs/tasks/run-application/update-api-object-kubectl-patch/#use-a-json-merge-patch-to-update-a-deployment)
+ - Requires openshift >= 0.6.2
+ - If more than one merge_type is given, the merge_types will be tried in order
+ - If openshift >= 0.6.2, this defaults to C(['strategic-merge', 'merge']), which is ideal for using the same parameters
+ on resource kinds that combine Custom Resources and built-in resources. For openshift < 0.6.2, the default
+ is simply C(strategic-merge).
+ - mutually exclusive with C(apply)
+ choices:
+ - json
+ - merge
+ - strategic-merge
+ type: list
+ elements: str
+ wait:
+ description:
+ - Whether to wait for certain resource kinds to end up in the desired state. By default the module exits once Kubernetes has
+ received the request
+ - Implemented for C(state=present) for C(Deployment), C(DaemonSet) and C(Pod), and for C(state=absent) for all resource kinds.
+ - For resource kinds without an implementation, C(wait) returns immediately unless C(wait_condition) is set.
+ default: no
+ type: bool
+ wait_sleep:
+ description:
+ - Number of seconds to sleep between checks.
+ default: 5
+ type: int
+ wait_timeout:
+ description:
+ - How long in seconds to wait for the resource to end up in the desired state. Ignored if C(wait) is not set.
+ default: 120
+ type: int
+ wait_condition:
+ description:
+ - Specifies a custom condition on the status to wait for. Ignored if C(wait) is not set or is set to False.
+ suboptions:
+ type:
+ type: str
+ description:
+ - The type of condition to wait for. For example, the C(Pod) resource will set the C(Ready) condition (among others)
+ - Required if you are specifying a C(wait_condition). If left empty, the C(wait_condition) field will be ignored.
+ - The possible types for a condition are specific to each resource type in Kubernetes. See the API documentation of the status field
+ for a given resource to see possible choices.
+ status:
+ type: str
+ description:
+ - The value of the status field in your desired condition.
+ - For example, if a C(Deployment) is paused, the C(Progressing) C(type) will have the C(Unknown) status.
+ choices:
+ - True
+ - False
+ - Unknown
+ default: "True"
+ reason:
+ type: str
+ description:
+ - The value of the reason field in your desired condition
+ - For example, if a C(Deployment) is paused, The C(Progressing) C(type) will have the C(DeploymentPaused) reason.
+ - The possible reasons in a condition are specific to each resource type in Kubernetes. See the API documentation of the status field
+ for a given resource to see possible choices.
+ type: dict
+ validate:
+ description:
+ - how (if at all) to validate the resource definition against the kubernetes schema.
+ Requires the kubernetes-validate python module
+ suboptions:
+ fail_on_error:
+ description: whether to fail on validation errors.
+ type: bool
+ version:
+ description: version of Kubernetes to validate against. defaults to Kubernetes server version
+ type: str
+ strict:
+ description: whether to fail when passing unexpected properties
+ default: True
+ type: bool
+ type: dict
+ append_hash:
+ description:
+ - Whether to append a hash to a resource name for immutability purposes
+ - Applies only to ConfigMap and Secret resources
+ - The parameter will be silently ignored for other resource kinds
+ - The full definition of an object is needed to generate the hash - this means that deleting an object created with append_hash
+ will only work if the same object is passed with state=absent (alternatively, just use state=absent with the name including
+ the generated hash and append_hash=no)
+ type: bool
+ apply:
+ description:
+ - C(apply) compares the desired resource definition with the previously supplied resource definition,
+ ignoring properties that are automatically generated
+ - C(apply) works better with Services than 'force=yes'
+ - mutually exclusive with C(merge_type)
+ type: bool
+
+requirements:
+ - "python >= 2.7"
+ - "openshift >= 0.6"
+ - "PyYAML >= 3.11"
+'''
+
+EXAMPLES = '''
+- name: Create a k8s namespace
+ community.okd.k8s:
+ name: testing
+ api_version: v1
+ kind: Namespace
+ state: present
+
+- name: Create a Service object from an inline definition
+ community.okd.k8s:
+ state: present
+ definition:
+ apiVersion: v1
+ kind: Service
+ metadata:
+ name: web
+ namespace: testing
+ labels:
+ app: galaxy
+ service: web
+ spec:
+ selector:
+ app: galaxy
+ service: web
+ ports:
+ - protocol: TCP
+ targetPort: 8000
+ name: port-8000-tcp
+ port: 8000
+
+- name: Remove an existing Service object
+ community.okd.k8s:
+ state: absent
+ api_version: v1
+ kind: Service
+ namespace: testing
+ name: web
+
+# Passing the object definition from a file
+
+- name: Create a Deployment by reading the definition from a local file
+ community.okd.k8s:
+ state: present
+ src: /testing/deployment.yml
+
+- name: >-
+ Read definition file from the Ansible controller file system.
+ If the definition file has been encrypted with Ansible Vault it will automatically be decrypted.
+ community.okd.k8s:
+ state: present
+ definition: "{{ lookup('file', '/testing/deployment.yml') | from_yaml }}"
+
+- name: Read definition file from the Ansible controller file system after Jinja templating
+ community.okd.k8s:
+ state: present
+ definition: "{{ lookup('template', '/testing/deployment.yml') | from_yaml }}"
+
+- name: fail on validation errors
+ community.okd.k8s:
+ state: present
+ definition: "{{ lookup('template', '/testing/deployment.yml') | from_yaml }}"
+ validate:
+ fail_on_error: yes
+
+- name: warn on validation errors, check for unexpected properties
+ community.okd.k8s:
+ state: present
+ definition: "{{ lookup('template', '/testing/deployment.yml') | from_yaml }}"
+ validate:
+ fail_on_error: no
+ strict: yes
+'''
+
+RETURN = '''
+result:
+ description:
+ - The created, patched, or otherwise present object. Will be empty in the case of a deletion.
+ returned: success
+ type: complex
+ contains:
+ api_version:
+ description: The versioned schema of this representation of an object.
+ returned: success
+ type: str
+ kind:
+ description: Represents the REST resource this object represents.
+ returned: success
+ type: str
+ metadata:
+ description: Standard object metadata. Includes name, namespace, annotations, labels, etc.
+ returned: success
+ type: complex
+ spec:
+ description: Specific attributes of the object. Will vary based on the I(api_version) and I(kind).
+ returned: success
+ type: complex
+ status:
+ description: Current status details for the object.
+ returned: success
+ type: complex
+ items:
+ description: Returned only when multiple yaml documents are passed to src or resource_definition
+ returned: when resource_definition or src contains list of objects
+ type: list
+ duration:
+ description: elapsed time of task in seconds
+ returned: when C(wait) is true
+ type: int
+ sample: 48
+'''
+
+import re
+import operator
+import traceback
+from functools import reduce
+
+from ansible.module_utils._text import to_native
+
+try:
+ from ansible_collections.community.kubernetes.plugins.module_utils.raw import KubernetesRawModule
+ HAS_KUBERNETES_COLLECTION = True
+except ImportError as e:
+ HAS_KUBERNETES_COLLECTION = False
+ k8s_collection_import_exception = e
+ K8S_COLLECTION_ERROR = traceback.format_exc()
+ from ansible.module_utils.basic import AnsibleModule as KubernetesRawModule
+
+try:
+ import yaml
+ from openshift.dynamic.exceptions import DynamicApiError, NotFoundError, ForbiddenError
+except ImportError:
+ # Exceptions handled in common
+ pass
+
+TRIGGER_ANNOTATION = 'image.openshift.io/triggers'
+TRIGGER_CONTAINER = re.compile(r"(?P<path>.*)\[((?P<index>[0-9]+)|\?\(@\.name==[\"'\\]*(?P<name>[a-z0-9]([-a-z0-9]*[a-z0-9])?))")
+
+
+class OKDRawModule(KubernetesRawModule):
+
+ def __init__(self):
+ if not HAS_KUBERNETES_COLLECTION:
+ self.fail_json(
+ msg="The community.kubernetes collection must be installed",
+ exception=K8S_COLLECTION_ERROR,
+ error=to_native(k8s_collection_import_exception)
+ )
+ super(OKDRawModule, self).__init__()
+
+ def perform_action(self, resource, definition):
+ state = self.params.get('state', None)
+ name = definition['metadata'].get('name')
+ namespace = definition['metadata'].get('namespace')
+
+ if state != 'absent':
+
+ if resource.kind in ['Project', 'ProjectRequest']:
+ try:
+ resource.get(name, namespace)
+ except (NotFoundError, ForbiddenError):
+ return self.create_project_request(definition)
+ except DynamicApiError as exc:
+ self.fail_json(msg='Failed to retrieve requested object: {0}'.format(exc.body),
+ error=exc.status, status=exc.status, reason=exc.reason)
+
+ try:
+ existing = resource.get(name=name, namespace=namespace).to_dict()
+ except Exception:
+ existing = None
+
+ if existing:
+ if resource.kind == 'DeploymentConfig':
+ if definition.get('spec', {}).get('triggers'):
+ definition = self.resolve_imagestream_triggers(existing, definition)
+ elif existing['metadata'].get('annotations', {}).get(TRIGGER_ANNOTATION):
+ definition = self.resolve_imagestream_trigger_annotation(existing, definition)
+
+ return super(OKDRawModule, self).perform_action(resource, definition)
+
+ @staticmethod
+ def get_index(desired, objects, keys):
+ """ Iterates over keys, returns the first object from objects where the value of the key
+ matches the value in desired
+ """
+ for i, item in enumerate(objects):
+ if item and all([desired.get(key, True) == item.get(key, False) for key in keys]):
+ return i
+
+ def resolve_imagestream_trigger_annotation(self, existing, definition):
+
+ def get_from_fields(d, fields):
+ try:
+ return reduce(operator.getitem, fields, d)
+ except Exception:
+ return None
+
+ def set_from_fields(d, fields, value):
+ get_from_fields(d, fields[:-1])[fields[-1]] = value
+
+ if TRIGGER_ANNOTATION in definition['metadata'].get('annotations', {}).keys():
+ triggers = yaml.safe_load(definition['metadata']['annotations'][TRIGGER_ANNOTATION] or '[]')
+ else:
+ triggers = yaml.safe_load(existing['metadata'].get('annotations', '{}').get(TRIGGER_ANNOTATION, '[]'))
+
+ if not isinstance(triggers, list):
+ return definition
+
+ for trigger in triggers:
+ if trigger.get('fieldPath'):
+ parsed = self.parse_trigger_fieldpath(trigger['fieldPath'])
+ path = parsed.get('path', '').split('.')
+ if path:
+ existing_containers = get_from_fields(existing, path)
+ new_containers = get_from_fields(definition, path)
+ if parsed.get('name'):
+ existing_index = self.get_index({'name': parsed['name']}, existing_containers, ['name'])
+ new_index = self.get_index({'name': parsed['name']}, new_containers, ['name'])
+ elif parsed.get('index') is not None:
+ existing_index = new_index = int(parsed['index'])
+ else:
+ existing_index = new_index = None
+ if existing_index is not None and new_index is not None:
+ if existing_index < len(existing_containers) and new_index < len(new_containers):
+ set_from_fields(definition, path + [new_index, 'image'], get_from_fields(existing, path + [existing_index, 'image']))
+ return definition
+
+ def resolve_imagestream_triggers(self, existing, definition):
+
+ existing_triggers = existing.get('spec', {}).get('triggers')
+ new_triggers = definition['spec']['triggers']
+ existing_containers = existing.get('spec', {}).get('template', {}).get('spec', {}).get('containers', [])
+ new_containers = definition.get('spec', {}).get('template', {}).get('spec', {}).get('containers', [])
+ for i, trigger in enumerate(new_triggers):
+ if trigger.get('type') == 'ImageChange' and trigger.get('imageChangeParams'):
+ names = trigger['imageChangeParams'].get('containerNames', [])
+ for name in names:
+ old_container_index = self.get_index({'name': name}, existing_containers, ['name'])
+ new_container_index = self.get_index({'name': name}, new_containers, ['name'])
+ if old_container_index is not None and new_container_index is not None:
+ image = existing['spec']['template']['spec']['containers'][old_container_index]['image']
+ definition['spec']['template']['spec']['containers'][new_container_index]['image'] = image
+
+ existing_index = self.get_index(trigger['imageChangeParams'], [x.get('imageChangeParams') for x in existing_triggers], ['containerNames'])
+ if existing_index is not None:
+ existing_image = existing_triggers[existing_index].get('imageChangeParams', {}).get('lastTriggeredImage')
+ if existing_image:
+ definition['spec']['triggers'][i]['imageChangeParams']['lastTriggeredImage'] = existing_image
+ existing_from = existing_triggers[existing_index].get('imageChangeParams', {}).get('from', {})
+ new_from = trigger['imageChangeParams'].get('from', {})
+ existing_namespace = existing_from.get('namespace')
+ existing_name = existing_from.get('name', False)
+ new_name = new_from.get('name', True)
+ add_namespace = existing_namespace and 'namespace' not in new_from.keys() and existing_name == new_name
+ if add_namespace:
+ definition['spec']['triggers'][i]['imageChangeParams']['from']['namespace'] = existing_from['namespace']
+
+ return definition
+
+ def parse_trigger_fieldpath(self, expression):
+ parsed = TRIGGER_CONTAINER.search(expression).groupdict()
+ if parsed.get('index'):
+ parsed['index'] = int(parsed['index'])
+ return parsed
+
+ def create_project_request(self, definition):
+ definition['kind'] = 'ProjectRequest'
+ result = {'changed': False, 'result': {}}
+ resource = self.find_resource('ProjectRequest', definition['apiVersion'], fail=True)
+ if not self.check_mode:
+ try:
+ k8s_obj = resource.create(definition)
+ result['result'] = k8s_obj.to_dict()
+ except DynamicApiError as exc:
+ self.fail_json(msg="Failed to create object: {0}".format(exc.body),
+ error=exc.status, status=exc.status, reason=exc.reason)
+ result['changed'] = True
+ result['method'] = 'create'
+ return result
+
+
+def main():
+ OKDRawModule().execute_module()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/okd/plugins/modules/k8s_auth.py b/collections-debian-merged/ansible_collections/community/okd/plugins/modules/k8s_auth.py
new file mode 100644
index 00000000..571c8413
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/okd/plugins/modules/k8s_auth.py
@@ -0,0 +1,365 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2018, KubeVirt Team <@kubevirt>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+
+module: openshift_auth
+
+short_description: Authenticate to OpenShift clusters which require an explicit login step
+
+version_added: "0.2.0"
+
+author:
+ - KubeVirt Team (@kubevirt)
+ - Fabian von Feilitzsch (@fabianvf)
+
+description:
+ - This module handles authenticating to OpenShift clusters requiring I(explicit) authentication procedures,
+ meaning ones where a client logs in (obtains an authentication token), performs API operations using said
+ token and then logs out (revokes the token).
+ - On the other hand a popular configuration for username+password authentication is one utilizing HTTP Basic
+ Auth, which does not involve any additional login/logout steps (instead login credentials can be attached
+ to each and every API call performed) and as such is handled directly by the C(k8s) module (and other
+ resource–specific modules) by utilizing the C(host), C(username) and C(password) parameters. Please
+ consult your preferred module's documentation for more details.
+
+options:
+ state:
+ description:
+ - If set to I(present) connect to the API server using the URL specified in C(host) and attempt to log in.
+ - If set to I(absent) attempt to log out by revoking the authentication token specified in C(api_key).
+ default: present
+ choices:
+ - present
+ - absent
+ type: str
+ host:
+ description:
+ - Provide a URL for accessing the API server.
+ required: true
+ type: str
+ username:
+ description:
+ - Provide a username for authenticating with the API server.
+ type: str
+ password:
+ description:
+ - Provide a password for authenticating with the API server.
+ type: str
+ ca_cert:
+ description:
+ - "Path to a CA certificate file used to verify connection to the API server. The full certificate chain
+ must be provided to avoid certificate validation errors."
+ aliases: [ ssl_ca_cert ]
+ type: path
+ validate_certs:
+ description:
+ - "Whether or not to verify the API server's SSL certificates."
+ type: bool
+ default: true
+ aliases: [ verify_ssl ]
+ api_key:
+ description:
+ - When C(state) is set to I(absent), this specifies the token to revoke.
+ type: str
+
+requirements:
+ - python >= 2.7
+ - urllib3
+ - requests
+ - requests-oauthlib
+'''
+
+EXAMPLES = r'''
+- hosts: localhost
+ module_defaults:
+ group/k8s:
+ host: https://k8s.example.com/
+ ca_cert: ca.pem
+ tasks:
+ - block:
+ # It's good practice to store login credentials in a secure vault and not
+ # directly in playbooks.
+ - include_vars: openshift_passwords.yml
+
+ - name: Log in (obtain access token)
+ community.okd.openshift_auth:
+ username: admin
+ password: "{{ openshift_admin_password }}"
+ register: openshift_auth_results
+
+ # Previous task provides the token/api_key, while all other parameters
+ # are taken from module_defaults
+ - name: Get a list of all pods from any namespace
+ community.kubernetes.k8s_info:
+ api_key: "{{ openshift_auth_results.openshift_auth.api_key }}"
+ kind: Pod
+ register: pod_list
+
+ always:
+ - name: If login succeeded, try to log out (revoke access token)
+ when: openshift_auth_results.openshift_auth.api_key is defined
+ community.okd.openshift_auth:
+ state: absent
+ api_key: "{{ openshift_auth_results.openshift_auth.api_key }}"
+'''
+
+# Returned value names need to match k8s modules parameter names, to make it
+# easy to pass returned values of openshift_auth to other k8s modules.
+# Discussion: https://github.com/ansible/ansible/pull/50807#discussion_r248827899
+RETURN = r'''
+openshift_auth:
+ description: OpenShift authentication facts.
+ returned: success
+ type: complex
+ contains:
+ api_key:
+ description: Authentication token.
+ returned: success
+ type: str
+ host:
+ description: URL for accessing the API server.
+ returned: success
+ type: str
+ ca_cert:
+ description: Path to a CA certificate file used to verify connection to the API server.
+ returned: success
+ type: str
+ validate_certs:
+ description: "Whether or not to verify the API server's SSL certificates."
+ returned: success
+ type: bool
+ username:
+ description: Username for authenticating with the API server.
+ returned: success
+ type: str
+k8s_auth:
+ description: Same as returned openshift_auth. Kept only for backwards compatibility
+ returned: success
+ type: complex
+ contains:
+ api_key:
+ description: Authentication token.
+ returned: success
+ type: str
+ host:
+ description: URL for accessing the API server.
+ returned: success
+ type: str
+ ca_cert:
+ description: Path to a CA certificate file used to verify connection to the API server.
+ returned: success
+ type: str
+ validate_certs:
+ description: "Whether or not to verify the API server's SSL certificates."
+ returned: success
+ type: bool
+ username:
+ description: Username for authenticating with the API server.
+ returned: success
+ type: str
+'''
+
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib_parse import urlparse, parse_qs, urlencode
+
+# 3rd party imports
+try:
+ import requests
+ HAS_REQUESTS = True
+except ImportError:
+ HAS_REQUESTS = False
+
+try:
+ from requests_oauthlib import OAuth2Session
+ HAS_REQUESTS_OAUTH = True
+except ImportError:
+ HAS_REQUESTS_OAUTH = False
+
+try:
+ from urllib3.util import make_headers
+ HAS_URLLIB3 = True
+except ImportError:
+ HAS_URLLIB3 = False
+
+
+K8S_AUTH_ARG_SPEC = {
+ 'state': {
+ 'default': 'present',
+ 'choices': ['present', 'absent'],
+ },
+ 'host': {'required': True},
+ 'username': {},
+ 'password': {'no_log': True},
+ 'ca_cert': {'type': 'path', 'aliases': ['ssl_ca_cert']},
+ 'validate_certs': {
+ 'type': 'bool',
+ 'default': True,
+ 'aliases': ['verify_ssl']
+ },
+ 'api_key': {'no_log': True},
+}
+
+
+class OpenShiftAuthModule(AnsibleModule):
+ def __init__(self):
+ AnsibleModule.__init__(
+ self,
+ argument_spec=K8S_AUTH_ARG_SPEC,
+ required_if=[
+ ('state', 'present', ['username', 'password']),
+ ('state', 'absent', ['api_key']),
+ ]
+ )
+
+ if not HAS_REQUESTS:
+ self.fail("This module requires the python 'requests' package. Try `pip install requests`.")
+
+ if not HAS_REQUESTS_OAUTH:
+ self.fail("This module requires the python 'requests-oauthlib' package. Try `pip install requests-oauthlib`.")
+
+ if not HAS_URLLIB3:
+ self.fail("This module requires the python 'urllib3' package. Try `pip install urllib3`.")
+
+ def execute_module(self):
+ state = self.params.get('state')
+ verify_ssl = self.params.get('validate_certs')
+ ssl_ca_cert = self.params.get('ca_cert')
+
+ self.auth_username = self.params.get('username')
+ self.auth_password = self.params.get('password')
+ self.auth_api_key = self.params.get('api_key')
+ self.con_host = self.params.get('host')
+
+ # python-requests takes either a bool or a path to a ca file as the 'verify' param
+ if verify_ssl and ssl_ca_cert:
+ self.con_verify_ca = ssl_ca_cert # path
+ else:
+ self.con_verify_ca = verify_ssl # bool
+
+ # Get needed info to access authorization APIs
+ self.openshift_discover()
+
+ if state == 'present':
+ new_api_key = self.openshift_login()
+ result = dict(
+ host=self.con_host,
+ validate_certs=verify_ssl,
+ ca_cert=ssl_ca_cert,
+ api_key=new_api_key,
+ username=self.auth_username,
+ )
+ else:
+ self.openshift_logout()
+ result = dict()
+
+ # return k8s_auth as well for backwards compatibility
+ self.exit_json(changed=False, openshift_auth=result, k8s_auth=result)
+
+ def openshift_discover(self):
+ url = '{0}/.well-known/oauth-authorization-server'.format(self.con_host)
+ ret = requests.get(url, verify=self.con_verify_ca)
+
+ if ret.status_code != 200:
+ self.fail_request("Couldn't find OpenShift's OAuth API", method='GET', url=url,
+ reason=ret.reason, status_code=ret.status_code)
+
+ try:
+ oauth_info = ret.json()
+
+ self.openshift_auth_endpoint = oauth_info['authorization_endpoint']
+ self.openshift_token_endpoint = oauth_info['token_endpoint']
+ except Exception:
+ self.fail_json(msg="Something went wrong discovering OpenShift OAuth details.",
+ exception=traceback.format_exc())
+
+ def openshift_login(self):
+ os_oauth = OAuth2Session(client_id='openshift-challenging-client')
+ authorization_url, state = os_oauth.authorization_url(self.openshift_auth_endpoint,
+ state="1", code_challenge_method='S256')
+ auth_headers = make_headers(basic_auth='{0}:{1}'.format(self.auth_username, self.auth_password))
+
+ # Request authorization code using basic auth credentials
+ ret = os_oauth.get(
+ authorization_url,
+ headers={'X-Csrf-Token': state, 'authorization': auth_headers.get('authorization')},
+ verify=self.con_verify_ca,
+ allow_redirects=False
+ )
+
+ if ret.status_code != 302:
+ self.fail_request("Authorization failed.", method='GET', url=authorization_url,
+ reason=ret.reason, status_code=ret.status_code)
+
+ # In here we have `code` and `state`, I think `code` is the important one
+ qwargs = {}
+ for k, v in parse_qs(urlparse(ret.headers['Location']).query).items():
+ qwargs[k] = v[0]
+ qwargs['grant_type'] = 'authorization_code'
+
+ # Using authorization code given to us in the Location header of the previous request, request a token
+ ret = os_oauth.post(
+ self.openshift_token_endpoint,
+ headers={
+ 'Accept': 'application/json',
+ 'Content-Type': 'application/x-www-form-urlencoded',
+ # This is just base64 encoded 'openshift-challenging-client:'
+ 'Authorization': 'Basic b3BlbnNoaWZ0LWNoYWxsZW5naW5nLWNsaWVudDo='
+ },
+ data=urlencode(qwargs),
+ verify=self.con_verify_ca
+ )
+
+ if ret.status_code != 200:
+ self.fail_request("Failed to obtain an authorization token.", method='POST',
+ url=self.openshift_token_endpoint,
+ reason=ret.reason, status_code=ret.status_code)
+
+ return ret.json()['access_token']
+
+ def openshift_logout(self):
+ url = '{0}/apis/oauth.openshift.io/v1/oauthaccesstokens/{1}'.format(self.con_host, self.auth_api_key)
+ headers = {
+ 'Accept': 'application/json',
+ 'Content-Type': 'application/json',
+ 'Authorization': 'Bearer {0}'.format(self.auth_api_key)
+ }
+ json = {
+ "apiVersion": "oauth.openshift.io/v1",
+ "kind": "DeleteOptions"
+ }
+
+ requests.delete(url, headers=headers, json=json, verify=self.con_verify_ca)
+ # Ignore errors, the token will time out eventually anyway
+
+ def fail(self, msg=None):
+ self.fail_json(msg=msg)
+
+ def fail_request(self, msg, **kwargs):
+ req_info = {}
+ for k, v in kwargs.items():
+ req_info['req_' + k] = v
+ self.fail_json(msg=msg, **req_info)
+
+
+def main():
+ module = OpenShiftAuthModule()
+ try:
+ module.execute_module()
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/okd/plugins/modules/openshift_auth.py b/collections-debian-merged/ansible_collections/community/okd/plugins/modules/openshift_auth.py
new file mode 100644
index 00000000..571c8413
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/okd/plugins/modules/openshift_auth.py
@@ -0,0 +1,365 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2018, KubeVirt Team <@kubevirt>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+
+module: openshift_auth
+
+short_description: Authenticate to OpenShift clusters which require an explicit login step
+
+version_added: "0.2.0"
+
+author:
+ - KubeVirt Team (@kubevirt)
+ - Fabian von Feilitzsch (@fabianvf)
+
+description:
+ - This module handles authenticating to OpenShift clusters requiring I(explicit) authentication procedures,
+ meaning ones where a client logs in (obtains an authentication token), performs API operations using said
+ token and then logs out (revokes the token).
+ - On the other hand a popular configuration for username+password authentication is one utilizing HTTP Basic
+ Auth, which does not involve any additional login/logout steps (instead login credentials can be attached
+ to each and every API call performed) and as such is handled directly by the C(k8s) module (and other
+ resource–specific modules) by utilizing the C(host), C(username) and C(password) parameters. Please
+ consult your preferred module's documentation for more details.
+
+options:
+ state:
+ description:
+ - If set to I(present) connect to the API server using the URL specified in C(host) and attempt to log in.
+ - If set to I(absent) attempt to log out by revoking the authentication token specified in C(api_key).
+ default: present
+ choices:
+ - present
+ - absent
+ type: str
+ host:
+ description:
+ - Provide a URL for accessing the API server.
+ required: true
+ type: str
+ username:
+ description:
+ - Provide a username for authenticating with the API server.
+ type: str
+ password:
+ description:
+ - Provide a password for authenticating with the API server.
+ type: str
+ ca_cert:
+ description:
+ - "Path to a CA certificate file used to verify connection to the API server. The full certificate chain
+ must be provided to avoid certificate validation errors."
+ aliases: [ ssl_ca_cert ]
+ type: path
+ validate_certs:
+ description:
+ - "Whether or not to verify the API server's SSL certificates."
+ type: bool
+ default: true
+ aliases: [ verify_ssl ]
+ api_key:
+ description:
+ - When C(state) is set to I(absent), this specifies the token to revoke.
+ type: str
+
+requirements:
+ - python >= 2.7
+ - urllib3
+ - requests
+ - requests-oauthlib
+'''
+
+EXAMPLES = r'''
+- hosts: localhost
+ module_defaults:
+ group/k8s:
+ host: https://k8s.example.com/
+ ca_cert: ca.pem
+ tasks:
+ - block:
+ # It's good practice to store login credentials in a secure vault and not
+ # directly in playbooks.
+ - include_vars: openshift_passwords.yml
+
+ - name: Log in (obtain access token)
+ community.okd.openshift_auth:
+ username: admin
+ password: "{{ openshift_admin_password }}"
+ register: openshift_auth_results
+
+ # Previous task provides the token/api_key, while all other parameters
+ # are taken from module_defaults
+ - name: Get a list of all pods from any namespace
+ community.kubernetes.k8s_info:
+ api_key: "{{ openshift_auth_results.openshift_auth.api_key }}"
+ kind: Pod
+ register: pod_list
+
+ always:
+ - name: If login succeeded, try to log out (revoke access token)
+ when: openshift_auth_results.openshift_auth.api_key is defined
+ community.okd.openshift_auth:
+ state: absent
+ api_key: "{{ openshift_auth_results.openshift_auth.api_key }}"
+'''
+
+# Returned value names need to match k8s modules parameter names, to make it
+# easy to pass returned values of openshift_auth to other k8s modules.
+# Discussion: https://github.com/ansible/ansible/pull/50807#discussion_r248827899
+RETURN = r'''
+openshift_auth:
+ description: OpenShift authentication facts.
+ returned: success
+ type: complex
+ contains:
+ api_key:
+ description: Authentication token.
+ returned: success
+ type: str
+ host:
+ description: URL for accessing the API server.
+ returned: success
+ type: str
+ ca_cert:
+ description: Path to a CA certificate file used to verify connection to the API server.
+ returned: success
+ type: str
+ validate_certs:
+ description: "Whether or not to verify the API server's SSL certificates."
+ returned: success
+ type: bool
+ username:
+ description: Username for authenticating with the API server.
+ returned: success
+ type: str
+k8s_auth:
+ description: Same as returned openshift_auth. Kept only for backwards compatibility
+ returned: success
+ type: complex
+ contains:
+ api_key:
+ description: Authentication token.
+ returned: success
+ type: str
+ host:
+ description: URL for accessing the API server.
+ returned: success
+ type: str
+ ca_cert:
+ description: Path to a CA certificate file used to verify connection to the API server.
+ returned: success
+ type: str
+ validate_certs:
+ description: "Whether or not to verify the API server's SSL certificates."
+ returned: success
+ type: bool
+ username:
+ description: Username for authenticating with the API server.
+ returned: success
+ type: str
+'''
+
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib_parse import urlparse, parse_qs, urlencode
+
+# 3rd party imports
+try:
+ import requests
+ HAS_REQUESTS = True
+except ImportError:
+ HAS_REQUESTS = False
+
+try:
+ from requests_oauthlib import OAuth2Session
+ HAS_REQUESTS_OAUTH = True
+except ImportError:
+ HAS_REQUESTS_OAUTH = False
+
+try:
+ from urllib3.util import make_headers
+ HAS_URLLIB3 = True
+except ImportError:
+ HAS_URLLIB3 = False
+
+
+K8S_AUTH_ARG_SPEC = {
+ 'state': {
+ 'default': 'present',
+ 'choices': ['present', 'absent'],
+ },
+ 'host': {'required': True},
+ 'username': {},
+ 'password': {'no_log': True},
+ 'ca_cert': {'type': 'path', 'aliases': ['ssl_ca_cert']},
+ 'validate_certs': {
+ 'type': 'bool',
+ 'default': True,
+ 'aliases': ['verify_ssl']
+ },
+ 'api_key': {'no_log': True},
+}
+
+
+class OpenShiftAuthModule(AnsibleModule):
+ def __init__(self):
+ AnsibleModule.__init__(
+ self,
+ argument_spec=K8S_AUTH_ARG_SPEC,
+ required_if=[
+ ('state', 'present', ['username', 'password']),
+ ('state', 'absent', ['api_key']),
+ ]
+ )
+
+ if not HAS_REQUESTS:
+ self.fail("This module requires the python 'requests' package. Try `pip install requests`.")
+
+ if not HAS_REQUESTS_OAUTH:
+ self.fail("This module requires the python 'requests-oauthlib' package. Try `pip install requests-oauthlib`.")
+
+ if not HAS_URLLIB3:
+ self.fail("This module requires the python 'urllib3' package. Try `pip install urllib3`.")
+
+ def execute_module(self):
+ state = self.params.get('state')
+ verify_ssl = self.params.get('validate_certs')
+ ssl_ca_cert = self.params.get('ca_cert')
+
+ self.auth_username = self.params.get('username')
+ self.auth_password = self.params.get('password')
+ self.auth_api_key = self.params.get('api_key')
+ self.con_host = self.params.get('host')
+
+ # python-requests takes either a bool or a path to a ca file as the 'verify' param
+ if verify_ssl and ssl_ca_cert:
+ self.con_verify_ca = ssl_ca_cert # path
+ else:
+ self.con_verify_ca = verify_ssl # bool
+
+ # Get needed info to access authorization APIs
+ self.openshift_discover()
+
+ if state == 'present':
+ new_api_key = self.openshift_login()
+ result = dict(
+ host=self.con_host,
+ validate_certs=verify_ssl,
+ ca_cert=ssl_ca_cert,
+ api_key=new_api_key,
+ username=self.auth_username,
+ )
+ else:
+ self.openshift_logout()
+ result = dict()
+
+ # return k8s_auth as well for backwards compatibility
+ self.exit_json(changed=False, openshift_auth=result, k8s_auth=result)
+
+ def openshift_discover(self):
+ url = '{0}/.well-known/oauth-authorization-server'.format(self.con_host)
+ ret = requests.get(url, verify=self.con_verify_ca)
+
+ if ret.status_code != 200:
+ self.fail_request("Couldn't find OpenShift's OAuth API", method='GET', url=url,
+ reason=ret.reason, status_code=ret.status_code)
+
+ try:
+ oauth_info = ret.json()
+
+ self.openshift_auth_endpoint = oauth_info['authorization_endpoint']
+ self.openshift_token_endpoint = oauth_info['token_endpoint']
+ except Exception:
+ self.fail_json(msg="Something went wrong discovering OpenShift OAuth details.",
+ exception=traceback.format_exc())
+
+ def openshift_login(self):
+ os_oauth = OAuth2Session(client_id='openshift-challenging-client')
+ authorization_url, state = os_oauth.authorization_url(self.openshift_auth_endpoint,
+ state="1", code_challenge_method='S256')
+ auth_headers = make_headers(basic_auth='{0}:{1}'.format(self.auth_username, self.auth_password))
+
+ # Request authorization code using basic auth credentials
+ ret = os_oauth.get(
+ authorization_url,
+ headers={'X-Csrf-Token': state, 'authorization': auth_headers.get('authorization')},
+ verify=self.con_verify_ca,
+ allow_redirects=False
+ )
+
+ if ret.status_code != 302:
+ self.fail_request("Authorization failed.", method='GET', url=authorization_url,
+ reason=ret.reason, status_code=ret.status_code)
+
+ # In here we have `code` and `state`, I think `code` is the important one
+ qwargs = {}
+ for k, v in parse_qs(urlparse(ret.headers['Location']).query).items():
+ qwargs[k] = v[0]
+ qwargs['grant_type'] = 'authorization_code'
+
+ # Using authorization code given to us in the Location header of the previous request, request a token
+ ret = os_oauth.post(
+ self.openshift_token_endpoint,
+ headers={
+ 'Accept': 'application/json',
+ 'Content-Type': 'application/x-www-form-urlencoded',
+ # This is just base64 encoded 'openshift-challenging-client:'
+ 'Authorization': 'Basic b3BlbnNoaWZ0LWNoYWxsZW5naW5nLWNsaWVudDo='
+ },
+ data=urlencode(qwargs),
+ verify=self.con_verify_ca
+ )
+
+ if ret.status_code != 200:
+ self.fail_request("Failed to obtain an authorization token.", method='POST',
+ url=self.openshift_token_endpoint,
+ reason=ret.reason, status_code=ret.status_code)
+
+ return ret.json()['access_token']
+
+ def openshift_logout(self):
+ url = '{0}/apis/oauth.openshift.io/v1/oauthaccesstokens/{1}'.format(self.con_host, self.auth_api_key)
+ headers = {
+ 'Accept': 'application/json',
+ 'Content-Type': 'application/json',
+ 'Authorization': 'Bearer {0}'.format(self.auth_api_key)
+ }
+ json = {
+ "apiVersion": "oauth.openshift.io/v1",
+ "kind": "DeleteOptions"
+ }
+
+ requests.delete(url, headers=headers, json=json, verify=self.con_verify_ca)
+ # Ignore errors, the token will time out eventually anyway
+
+ def fail(self, msg=None):
+ self.fail_json(msg=msg)
+
+ def fail_request(self, msg, **kwargs):
+ req_info = {}
+ for k, v in kwargs.items():
+ req_info['req_' + k] = v
+ self.fail_json(msg=msg, **req_info)
+
+
+def main():
+ module = OpenShiftAuthModule()
+ try:
+ module.execute_module()
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/okd/plugins/modules/openshift_process.py b/collections-debian-merged/ansible_collections/community/okd/plugins/modules/openshift_process.py
new file mode 100644
index 00000000..feee81b8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/okd/plugins/modules/openshift_process.py
@@ -0,0 +1,389 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2020, Red Hat
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+module: openshift_process
+
+short_description: Process an OpenShift template.openshift.io/v1 Template
+
+version_added: "0.3.0"
+
+author: "Fabian von Feilitzsch (@fabianvf)"
+
+description:
+ - Processes a specified OpenShift template with the provided template.
+ - Templates can be provided inline, from a file, or specified by name and namespace in the cluster.
+ - Analogous to `oc process`.
+ - For CRUD operations on Template resources themselves, see the community.okd.k8s module.
+
+extends_documentation_fragment:
+ - community.kubernetes.k8s_auth_options
+ - community.kubernetes.k8s_wait_options
+ - community.kubernetes.k8s_resource_options
+
+requirements:
+ - "python >= 2.7"
+ - "openshift >= 0.11.0"
+ - "PyYAML >= 3.11"
+
+options:
+ name:
+ description:
+ - The name of the Template to process.
+ - The Template must be present in the cluster.
+ - When provided, I(namespace) is required.
+ - Mutually exlusive with I(resource_definition) or I(src)
+ type: str
+ namespace:
+ description:
+ - The namespace that the template can be found in.
+ type: str
+ namespace_target:
+ description:
+ - The namespace that resources should be created, updated, or deleted in.
+ - Only used when I(state) is present or absent.
+ parameters:
+ description:
+ - 'A set of key: value pairs that will be used to set/override values in the Template.'
+ - Corresponds to the `--param` argument to oc process.
+ type: dict
+ parameter_file:
+ description:
+ - A path to a file containing template parameter values to override/set values in the Template.
+ - Corresponds to the `--param-file` argument to oc process.
+ type: str
+ state:
+ description:
+ - Determines what to do with the rendered Template.
+ - The state I(rendered) will render the Template based on the provided parameters, and return the rendered
+ objects in the I(resources) field. These can then be referenced in future tasks.
+ - The state I(present) will cause the resources in the rendered Template to be created if they do not
+ already exist, and patched if they do.
+ - The state I(absent) will delete the resources in the rendered Template.
+ type: str
+ default: rendered
+ choices: [ absent, present, rendered ]
+'''
+
+EXAMPLES = r'''
+- name: Process a template in the cluster
+ community.okd.openshift_process:
+ name: nginx-example
+ namespace: openshift # only needed if using a template already on the server
+ parameters:
+ NAMESPACE: openshift
+ NAME: test123
+ state: rendered
+ register: result
+
+- name: Create the rendered resources using apply
+ community.okd.k8s:
+ namespace: default
+ definition: '{{ item }}'
+ wait: yes
+ apply: yes
+ loop: '{{ result.resources }}'
+
+- name: Process a template with parameters from an env file and create the resources
+ community.okd.openshift_process:
+ name: nginx-example
+ namespace: openshift
+ namespace_target: default
+ parameter_file: 'files/nginx.env'
+ state: present
+ wait: yes
+
+- name: Process a local template and create the resources
+ community.okd.openshift_process:
+ src: files/example-template.yaml
+ parameter_file: files/example.env
+ namespace_target: default
+ state: present
+
+- name: Process a local template, delete the resources, and wait for them to terminate
+ community.okd.openshift_process:
+ src: files/example-template.yaml
+ parameter_file: files/example.env
+ namespace_target: default
+ state: absent
+ wait: yes
+'''
+
+RETURN = r'''
+result:
+ description:
+ - The created, patched, or otherwise present object. Will be empty in the case of a deletion.
+ returned: on success when state is present or absent
+ type: complex
+ contains:
+ apiVersion:
+ description: The versioned schema of this representation of an object.
+ returned: success
+ type: str
+ kind:
+ description: Represents the REST resource this object represents.
+ returned: success
+ type: str
+ metadata:
+ description: Standard object metadata. Includes name, namespace, annotations, labels, etc.
+ returned: success
+ type: complex
+ contains:
+ name:
+ description: The name of the resource
+ type: str
+ namespace:
+ description: The namespace of the resource
+ type: str
+ spec:
+ description: Specific attributes of the object. Will vary based on the I(api_version) and I(kind).
+ returned: success
+ type: dict
+ status:
+ description: Current status details for the object.
+ returned: success
+ type: complex
+ contains:
+ conditions:
+ type: complex
+ description: Array of status conditions for the object. Not guaranteed to be present
+ items:
+ description: Returned only when multiple yaml documents are passed to src or resource_definition
+ returned: when resource_definition or src contains list of objects
+ type: list
+ duration:
+ description: elapsed time of task in seconds
+ returned: when C(wait) is true
+ type: int
+ sample: 48
+resources:
+ type: complex
+ description:
+ - The rendered resources defined in the Template
+ returned: on success when state is rendered
+ contains:
+ apiVersion:
+ description: The versioned schema of this representation of an object.
+ returned: success
+ type: str
+ kind:
+ description: Represents the REST resource this object represents.
+ returned: success
+ type: str
+ metadata:
+ description: Standard object metadata. Includes name, namespace, annotations, labels, etc.
+ returned: success
+ type: complex
+ contains:
+ name:
+ description: The name of the resource
+ type: str
+ namespace:
+ description: The namespace of the resource
+ type: str
+ spec:
+ description: Specific attributes of the object. Will vary based on the I(api_version) and I(kind).
+ returned: success
+ type: dict
+ status:
+ description: Current status details for the object.
+ returned: success
+ type: dict
+ contains:
+ conditions:
+ type: complex
+ description: Array of status conditions for the object. Not guaranteed to be present
+'''
+
+import re
+import os
+import copy
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+try:
+ from ansible_collections.community.kubernetes.plugins.module_utils.common import (
+ K8sAnsibleMixin, AUTH_ARG_SPEC, RESOURCE_ARG_SPEC, WAIT_ARG_SPEC
+ )
+ HAS_KUBERNETES_COLLECTION = True
+except ImportError as e:
+ HAS_KUBERNETES_COLLECTION = False
+ k8s_collection_import_exception = e
+ K8S_COLLECTION_ERROR = traceback.format_exc()
+ K8sAnsibleMixin = object
+ AUTH_ARG_SPEC = RESOURCE_ARG_SPEC = WAIT_ARG_SPEC = {}
+
+try:
+ from openshift.dynamic.exceptions import DynamicApiError, NotFoundError
+except ImportError:
+ pass
+
+DOTENV_PARSER = re.compile(r"(?x)^(\s*(\#.*|\s*|(export\s+)?(?P<key>[A-z_][A-z0-9_.]*)=(?P<value>.+?)?)\s*)[\r\n]*$")
+
+
+class OpenShiftProcess(K8sAnsibleMixin):
+
+ def __init__(self):
+ self.module = AnsibleModule(
+ argument_spec=self.argspec,
+ supports_check_mode=True,
+ )
+ self.fail_json = self.module.fail_json
+ self.exit_json = self.module.exit_json
+
+ if not HAS_KUBERNETES_COLLECTION:
+ self.module.fail_json(
+ msg="The community.kubernetes collection must be installed",
+ exception=K8S_COLLECTION_ERROR,
+ error=to_native(k8s_collection_import_exception)
+ )
+
+ super(OpenShiftProcess, self).__init__()
+
+ self.params = self.module.params
+ self.check_mode = self.module.check_mode
+
+ @property
+ def argspec(self):
+ spec = copy.deepcopy(AUTH_ARG_SPEC)
+ spec.update(copy.deepcopy(WAIT_ARG_SPEC))
+ spec.update(copy.deepcopy(RESOURCE_ARG_SPEC))
+
+ spec['state'] = dict(type='str', default='rendered', choices=['present', 'absent', 'rendered'])
+ spec['namespace'] = dict(type='str')
+ spec['namespace_target'] = dict(type='str')
+ spec['parameters'] = dict(type='dict')
+ spec['name'] = dict(type='str')
+ spec['parameter_file'] = dict(type='str')
+
+ return spec
+
+ def execute_module(self):
+ self.client = self.get_api_client()
+
+ v1_templates = self.find_resource('templates', 'template.openshift.io/v1', fail=True)
+ v1_processed_templates = self.find_resource('processedtemplates', 'template.openshift.io/v1', fail=True)
+
+ name = self.params.get('name')
+ namespace = self.params.get('namespace')
+ namespace_target = self.params.get('namespace_target')
+ definition = self.params.get('resource_definition')
+ src = self.params.get('src')
+
+ state = self.params.get('state')
+
+ parameters = self.params.get('parameters') or {}
+ parameter_file = self.params.get('parameter_file')
+
+ if (name and definition) or (name and src) or (src and definition):
+ self.fail_json("Only one of src, name, or definition may be provided")
+
+ if name and not namespace:
+ self.fail_json("namespace is required when name is set")
+
+ template = None
+
+ if src or definition:
+ self.set_resource_definitions()
+ if len(self.resource_definitions) < 1:
+ self.fail_json('Unable to load a Template resource from src or resource_definition')
+ elif len(self.resource_definitions) > 1:
+ self.fail_json('Multiple Template resources found in src or resource_definition, only one Template may be processed at a time')
+ template = self.resource_definitions[0]
+ template_namespace = template.get('metadata', {}).get('namespace')
+ namespace = template_namespace or namespace or namespace_target or 'default'
+ elif name and namespace:
+ try:
+ template = v1_templates.get(name=name, namespace=namespace).to_dict()
+ except DynamicApiError as exc:
+ self.fail_json(msg="Failed to retrieve Template with name '{0}' in namespace '{1}': {2}".format(name, namespace, exc.body),
+ error=exc.status, status=exc.status, reason=exc.reason)
+ except Exception as exc:
+ self.module.fail_json(msg="Failed to retrieve Template with name '{0}' in namespace '{1}': {2}".format(name, namespace, to_native(exc)),
+ error='', status='', reason='')
+ else:
+ self.fail_json("One of resource_definition, src, or name and namespace must be provided")
+
+ if parameter_file:
+ parameters = self.parse_dotenv_and_merge(parameters, parameter_file)
+
+ for k, v in parameters.items():
+ template = self.update_template_param(template, k, v)
+
+ result = {'changed': False}
+
+ try:
+ response = v1_processed_templates.create(body=template, namespace=namespace).to_dict()
+ except DynamicApiError as exc:
+ self.fail_json(msg="Server failed to render the Template: {0}".format(exc.body),
+ error=exc.status, status=exc.status, reason=exc.reason)
+ except Exception as exc:
+ self.module.fail_json(msg="Server failed to render the Template: {0}".format(to_native(exc)),
+ error='', status='', reason='')
+
+ result['message'] = response['message']
+ result['resources'] = response['objects']
+
+ if state != 'rendered':
+ self.resource_definitions = response['objects']
+ self.kind = self.api_version = self.name = None
+ self.namespace = self.params.get('namespace_target')
+ self.append_hash = False
+ self.apply = False
+ self.params['validate'] = None
+ self.params['merge_type'] = None
+ super(OpenShiftProcess, self).execute_module()
+
+ self.module.exit_json(**result)
+
+ def update_template_param(self, template, k, v):
+ for i, param in enumerate(template['parameters']):
+ if param['name'] == k:
+ template['parameters'][i]['value'] = v
+ return template
+ return template
+
+ def parse_dotenv_and_merge(self, parameters, parameter_file):
+ path = os.path.normpath(parameter_file)
+ if not os.path.exists(path):
+ self.fail(msg="Error accessing {0}. Does the file exist?".format(path))
+ try:
+ with open(path, 'r') as f:
+ multiline = ''
+ for line in f.readlines():
+ line = line.strip()
+ if line.endswith('\\'):
+ multiline += ' '.join(line.rsplit('\\', 1))
+ continue
+ if multiline:
+ line = multiline + line
+ multiline = ''
+ match = DOTENV_PARSER.search(line)
+ if not match:
+ continue
+ match = match.groupdict()
+ if match.get('key'):
+ if match['key'] in parameters:
+ self.fail_json(msg="Duplicate value for '{0}' detected in parameter file".format(match['key']))
+ parameters[match['key']] = match['value']
+ except IOError as exc:
+ self.fail(msg="Error loading parameter file: {0}".format(exc))
+ return parameters
+
+
+def main():
+ OpenShiftProcess().execute_module()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/okd/plugins/modules/openshift_route.py b/collections-debian-merged/ansible_collections/community/okd/plugins/modules/openshift_route.py
new file mode 100644
index 00000000..77c28b3c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/okd/plugins/modules/openshift_route.py
@@ -0,0 +1,544 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2020, Red Hat
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+module: openshift_route
+
+short_description: Expose a Service as an OpenShift Route.
+
+version_added: "0.3.0"
+
+author: "Fabian von Feilitzsch (@fabianvf)"
+
+description:
+ - Looks up a Service and creates a new Route based on it.
+ - Analogous to `oc expose` and `oc create route` for creating Routes, but does not support creating Services.
+ - For creating Services from other resources, see community.kubernetes.k8s_expose
+
+extends_documentation_fragment:
+ - community.kubernetes.k8s_auth_options
+ - community.kubernetes.k8s_wait_options
+ - community.kubernetes.k8s_state_options
+
+requirements:
+ - "python >= 2.7"
+ - "openshift >= 0.11.0"
+ - "PyYAML >= 3.11"
+
+options:
+ service:
+ description:
+ - The name of the service to expose.
+ - Required when I(state) is not absent.
+ type: str
+ aliases: ['svc']
+ namespace:
+ description:
+ - The namespace of the resource being targeted.
+ - The Route will be created in this namespace as well.
+ required: yes
+ type: str
+ labels:
+ description:
+ - Specify the labels to apply to the created Route.
+ - 'A set of key: value pairs.'
+ type: dict
+ name:
+ description:
+ - The desired name of the Route to be created.
+ - Defaults to the value of I(service)
+ type: str
+ hostname:
+ description:
+ - The hostname for the Route.
+ type: str
+ path:
+ description:
+ - The path for the Route
+ type: str
+ wildcard_policy:
+ description:
+ - The wildcard policy for the hostname.
+ - Currently only Subdomain is supported.
+ - If not provided, the default of None will be used.
+ choices:
+ - Subdomain
+ type: str
+ port:
+ description:
+ - Name or number of the port the Route will route traffic to.
+ type: str
+ tls:
+ description:
+ - TLS configuration for the newly created route.
+ - Only used when I(termination) is set.
+ type: dict
+ suboptions:
+ ca_certificate:
+ description:
+ - Path to a CA certificate file on the target host.
+ - Not supported when I(termination) is set to passthrough.
+ type: str
+ certificate:
+ description:
+ - Path to a certificate file on the target host.
+ - Not supported when I(termination) is set to passthrough.
+ type: str
+ destination_ca_certificate:
+ description:
+ - Path to a CA certificate file used for securing the connection.
+ - Only used when I(termination) is set to reencrypt.
+ - Defaults to the Service CA.
+ type: str
+ key:
+ description:
+ - Path to a key file on the target host.
+ - Not supported when I(termination) is set to passthrough.
+ type: str
+ insecure_policy:
+ description:
+ - Sets the InsecureEdgeTerminationPolicy for the Route.
+ - Not supported when I(termination) is set to reencrypt.
+ - When I(termination) is set to passthrough, only redirect is supported.
+ - If not provided, insecure traffic will be disallowed.
+ type: str
+ choices:
+ - allow
+ - redirect
+ - disallow
+ default: disallow
+ termination:
+ description:
+ - The termination type of the Route.
+ - If left empty no termination type will be set, and the route will be insecure.
+ - When set to insecure I(tls) will be ignored.
+ choices:
+ - edge
+ - passthrough
+ - reencrypt
+ - insecure
+ default: insecure
+ type: str
+'''
+
+EXAMPLES = r'''
+- name: Create hello-world deployment
+ community.okd.k8s:
+ definition:
+ apiVersion: apps/v1
+ kind: Deployment
+ metadata:
+ name: hello-kubernetes
+ namespace: default
+ spec:
+ replicas: 3
+ selector:
+ matchLabels:
+ app: hello-kubernetes
+ template:
+ metadata:
+ labels:
+ app: hello-kubernetes
+ spec:
+ containers:
+ - name: hello-kubernetes
+ image: paulbouwer/hello-kubernetes:1.8
+ ports:
+ - containerPort: 8080
+
+- name: Create Service for the hello-world deployment
+ community.okd.k8s:
+ definition:
+ apiVersion: v1
+ kind: Service
+ metadata:
+ name: hello-kubernetes
+ namespace: default
+ spec:
+ ports:
+ - port: 80
+ targetPort: 8080
+ selector:
+ app: hello-kubernetes
+
+- name: Expose the insecure hello-world service externally
+ community.okd.openshift_route:
+ service: hello-kubernetes
+ namespace: default
+ insecure_policy: allow
+ register: route
+'''
+
+RETURN = r'''
+result:
+ description:
+ - The Route object that was created or updated. Will be empty in the case of deletion.
+ returned: success
+ type: complex
+ contains:
+ apiVersion:
+ description: The versioned schema of this representation of an object.
+ returned: success
+ type: str
+ kind:
+ description: Represents the REST resource this object represents.
+ returned: success
+ type: str
+ metadata:
+ description: Standard object metadata. Includes name, namespace, annotations, labels, etc.
+ returned: success
+ type: complex
+ contains:
+ name:
+ description: The name of the created Route
+ type: str
+ namespace:
+ description: The namespace of the create Route
+ type: str
+ spec:
+ description: Specification for the Route
+ returned: success
+ type: complex
+ contains:
+ host:
+ description: Host is an alias/DNS that points to the service.
+ type: str
+ path:
+ description: Path that the router watches for, to route traffic for to the service.
+ type: str
+ port:
+ description: Defines a port mapping from a router to an endpoint in the service endpoints.
+ type: complex
+ contains:
+ targetPort:
+ description: The target port on pods selected by the service this route points to.
+ type: str
+ tls:
+ description: Defines config used to secure a route and provide termination.
+ type: complex
+ contains:
+ caCertificate:
+ description: Provides the cert authority certificate contents.
+ type: str
+ certificate:
+ description: Provides certificate contents.
+ type: str
+ destinationCACertificate:
+ description: Provides the contents of the ca certificate of the final destination.
+ type: str
+ insecureEdgeTerminationPolicy:
+ description: Indicates the desired behavior for insecure connections to a route.
+ type: str
+ key:
+ description: Provides key file contents.
+ type: str
+ termination:
+ description: Indicates termination type.
+ type: str
+ to:
+ description: Specifies the target that resolve into endpoints.
+ type: complex
+ contains:
+ kind:
+ description: The kind of target that the route is referring to. Currently, only 'Service' is allowed.
+ type: str
+ name:
+ description: Name of the service/target that is being referred to. e.g. name of the service.
+ type: str
+ weight:
+ description: Specifies the target's relative weight against other target reference objects.
+ type: int
+ wildcardPolicy:
+ description: Wildcard policy if any for the route.
+ type: str
+ status:
+ description: Current status details for the Route
+ returned: success
+ type: complex
+ contains:
+ ingress:
+ description: List of places where the route may be exposed.
+ type: complex
+ contains:
+ conditions:
+ description: Array of status conditions for the Route ingress.
+ type: complex
+ contains:
+ type:
+ description: The type of the condition. Currently only 'Ready'.
+ type: str
+ status:
+ description: The status of the condition. Can be True, False, Unknown.
+ type: str
+ host:
+ description: The host string under which the route is exposed.
+ type: str
+ routerCanonicalHostname:
+ description: The external host name for the router that can be used as a CNAME for the host requested for this route. May not be set.
+ type: str
+ routerName:
+ description: A name chosen by the router to identify itself.
+ type: str
+ wildcardPolicy:
+ description: The wildcard policy that was allowed where this route is exposed.
+ type: str
+duration:
+ description: elapsed time of task in seconds
+ returned: when C(wait) is true
+ type: int
+ sample: 48
+'''
+
+import copy
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+try:
+ from ansible_collections.community.kubernetes.plugins.module_utils.common import (
+ K8sAnsibleMixin, AUTH_ARG_SPEC, WAIT_ARG_SPEC, COMMON_ARG_SPEC
+ )
+ HAS_KUBERNETES_COLLECTION = True
+except ImportError as e:
+ HAS_KUBERNETES_COLLECTION = False
+ k8s_collection_import_exception = e
+ K8S_COLLECTION_ERROR = traceback.format_exc()
+ K8sAnsibleMixin = object
+ AUTH_ARG_SPEC = WAIT_ARG_SPEC = COMMON_ARG_SPEC = {}
+
+try:
+ from openshift.dynamic.exceptions import DynamicApiError, NotFoundError
+except ImportError:
+ pass
+
+
+class OpenShiftRoute(K8sAnsibleMixin):
+
+ def __init__(self):
+ self.module = AnsibleModule(
+ argument_spec=self.argspec,
+ supports_check_mode=True,
+ )
+ self.fail_json = self.module.fail_json
+
+ if not HAS_KUBERNETES_COLLECTION:
+ self.module.fail_json(
+ msg="The community.kubernetes collection must be installed",
+ exception=K8S_COLLECTION_ERROR,
+ error=to_native(k8s_collection_import_exception)
+ )
+
+ super(OpenShiftRoute, self).__init__()
+
+ self.params = self.module.params
+ # TODO: should probably make it so that at least some of these aren't required for perform_action to work
+ # Or at least explicitly pass them in
+ self.append_hash = False
+ self.apply = False
+ self.check_mode = self.module.check_mode
+ self.warnings = []
+ self.params['merge_type'] = None
+
+ @property
+ def argspec(self):
+ spec = copy.deepcopy(AUTH_ARG_SPEC)
+ spec.update(copy.deepcopy(WAIT_ARG_SPEC))
+ spec.update(copy.deepcopy(COMMON_ARG_SPEC))
+
+ spec['service'] = dict(type='str', aliases=['svc'])
+ spec['namespace'] = dict(required=True, type='str')
+ spec['labels'] = dict(type='dict')
+ spec['name'] = dict(type='str')
+ spec['hostname'] = dict(type='str')
+ spec['path'] = dict(type='str')
+ spec['wildcard_policy'] = dict(choices=['Subdomain'], type='str')
+ spec['port'] = dict(type='str')
+ spec['tls'] = dict(type='dict', options=dict(
+ ca_certificate=dict(type='str'),
+ certificate=dict(type='str'),
+ destination_ca_certificate=dict(type='str'),
+ key=dict(type='str'),
+ insecure_policy=dict(type='str', choices=['allow', 'redirect', 'disallow'], default='disallow'),
+ ))
+ spec['termination'] = dict(choices=['edge', 'passthrough', 'reencrypt', 'insecure'], default='insecure')
+
+ return spec
+
+ def execute_module(self):
+ self.client = self.get_api_client()
+ v1_routes = self.find_resource('Route', 'route.openshift.io/v1', fail=True)
+
+ service_name = self.params.get('service')
+ namespace = self.params['namespace']
+ termination_type = self.params.get('termination')
+ if termination_type == 'insecure':
+ termination_type = None
+ state = self.params.get('state')
+
+ if state != 'absent' and not service_name:
+ self.fail_json("If 'state' is not 'absent' then 'service' must be provided")
+
+ # We need to do something a little wonky to wait if the user doesn't supply a custom condition
+ custom_wait = self.params.get('wait') and not self.params.get('wait_condition') and state != 'absent'
+ if custom_wait:
+ # Don't use default wait logic in perform_action
+ self.params['wait'] = False
+
+ route_name = self.params.get('name') or service_name
+ labels = self.params.get('labels')
+ hostname = self.params.get('hostname')
+ path = self.params.get('path')
+ wildcard_policy = self.params.get('wildcard_policy')
+ port = self.params.get('port')
+
+ if termination_type and self.params.get('tls'):
+ tls_ca_cert = self.params['tls'].get('ca_certificate')
+ tls_cert = self.params['tls'].get('certificate')
+ tls_dest_ca_cert = self.params['tls'].get('destination_ca_certificate')
+ tls_key = self.params['tls'].get('key')
+ tls_insecure_policy = self.params['tls'].get('insecure_policy')
+ if tls_insecure_policy == 'disallow':
+ tls_insecure_policy = None
+ else:
+ tls_ca_cert = tls_cert = tls_dest_ca_cert = tls_key = tls_insecure_policy = None
+
+ route = {
+ 'apiVersion': 'route.openshift.io/v1',
+ 'kind': 'Route',
+ 'metadata': {
+ 'name': route_name,
+ 'namespace': namespace,
+ 'labels': labels,
+ },
+ 'spec': {}
+ }
+
+ if state != 'absent':
+ route['spec'] = self.build_route_spec(
+ service_name, namespace,
+ port=port,
+ wildcard_policy=wildcard_policy,
+ hostname=hostname,
+ path=path,
+ termination_type=termination_type,
+ tls_insecure_policy=tls_insecure_policy,
+ tls_ca_cert=tls_ca_cert,
+ tls_cert=tls_cert,
+ tls_key=tls_key,
+ tls_dest_ca_cert=tls_dest_ca_cert,
+ )
+
+ result = self.perform_action(v1_routes, route)
+ timeout = self.params.get('wait_timeout')
+ sleep = self.params.get('wait_sleep')
+ if custom_wait:
+ success, result['result'], result['duration'] = self._wait_for(v1_routes, route_name, namespace, wait_predicate, sleep, timeout, state)
+
+ self.module.exit_json(**result)
+
+ def build_route_spec(self, service_name, namespace, port=None, wildcard_policy=None, hostname=None, path=None, termination_type=None,
+ tls_insecure_policy=None, tls_ca_cert=None, tls_cert=None, tls_key=None, tls_dest_ca_cert=None):
+ v1_services = self.find_resource('Service', 'v1', fail=True)
+ try:
+ target_service = v1_services.get(name=service_name, namespace=namespace)
+ except NotFoundError:
+ if not port:
+ self.module.fail_json(msg="You need to provide the 'port' argument when exposing a non-existent service")
+ target_service = None
+ except DynamicApiError as exc:
+ self.module.fail_json(msg='Failed to retrieve service to be exposed: {0}'.format(exc.body),
+ error=exc.status, status=exc.status, reason=exc.reason)
+ except Exception as exc:
+ self.module.fail_json(msg='Failed to retrieve service to be exposed: {0}'.format(to_native(exc)),
+ error='', status='', reason='')
+
+ route_spec = {
+ 'tls': {},
+ 'to': {
+ 'kind': 'Service',
+ 'name': service_name,
+ },
+ 'port': {
+ 'targetPort': self.set_port(target_service, port),
+ },
+ 'wildcardPolicy': wildcard_policy
+ }
+
+ # Want to conditionally add these so we don't overwrite what is automically added when nothing is provided
+ if termination_type:
+ route_spec['tls'] = dict(termination=termination_type.capitalize())
+ if tls_insecure_policy:
+ if termination_type == 'edge':
+ route_spec['tls']['insecureEdgeTerminationPolicy'] = tls_insecure_policy.capitalize()
+ elif termination_type == 'passthrough':
+ if tls_insecure_policy != 'redirect':
+ self.module.fail_json("'redirect' is the only supported insecureEdgeTerminationPolicy for passthrough routes")
+ route_spec['tls']['insecureEdgeTerminationPolicy'] = tls_insecure_policy.capitalize()
+ elif termination_type == 'reencrypt':
+ self.module.fail_json("'tls.insecure_policy' is not supported with reencrypt routes")
+ else:
+ route_spec['tls']['insecureEdgeTerminationPolicy'] = None
+ if tls_ca_cert:
+ if termination_type == 'passthrough':
+ self.module.fail_json("'tls.ca_certificate' is not supported with passthrough routes")
+ route_spec['tls']['caCertificate'] = tls_ca_cert
+ if tls_cert:
+ if termination_type == 'passthrough':
+ self.module.fail_json("'tls.certificate' is not supported with passthrough routes")
+ route_spec['tls']['certificate'] = tls_cert
+ if tls_key:
+ if termination_type == 'passthrough':
+ self.module.fail_json("'tls.key' is not supported with passthrough routes")
+ route_spec['tls']['key'] = tls_key
+ if tls_dest_ca_cert:
+ if termination_type != 'reencrypt':
+ self.module.fail_json("'destination_certificate' is only valid for reencrypt routes")
+ route_spec['tls']['destinationCACertificate'] = tls_dest_ca_cert
+ else:
+ route_spec['tls'] = None
+ if hostname:
+ route_spec['host'] = hostname
+ if path:
+ route_spec['path'] = path
+
+ return route_spec
+
+ def set_port(self, service, port_arg):
+ if port_arg:
+ return port_arg
+ for p in service.spec.ports:
+ if p.protocol == 'TCP':
+ if p.name is not None:
+ return p.name
+ return p.targetPort
+ return None
+
+
+def wait_predicate(route):
+ if not(route.status and route.status.ingress):
+ return False
+ for ingress in route.status.ingress:
+ match = [x for x in ingress.conditions if x.type == 'Admitted']
+ if not match:
+ return False
+ match = match[0]
+ if match.status != "True":
+ return False
+ return True
+
+
+def main():
+ OpenShiftRoute().execute_module()
+
+
+if __name__ == '__main__':
+ main()