summaryrefslogtreecommitdiffstats
path: root/ansible_collections/community/okd/molecule/default/tasks
diff options
context:
space:
mode:
Diffstat (limited to 'ansible_collections/community/okd/molecule/default/tasks')
-rw-r--r--ansible_collections/community/okd/molecule/default/tasks/openshift_adm_prune_auth_clusterroles.yml318
-rw-r--r--ansible_collections/community/okd/molecule/default/tasks/openshift_adm_prune_auth_roles.yml340
-rw-r--r--ansible_collections/community/okd/molecule/default/tasks/openshift_adm_prune_deployments.yml269
-rw-r--r--ansible_collections/community/okd/molecule/default/tasks/openshift_auth.yml111
-rw-r--r--ansible_collections/community/okd/molecule/default/tasks/openshift_builds.yml245
-rw-r--r--ansible_collections/community/okd/molecule/default/tasks/openshift_import_images.yml179
-rw-r--r--ansible_collections/community/okd/molecule/default/tasks/openshift_process.yml183
-rw-r--r--ansible_collections/community/okd/molecule/default/tasks/openshift_prune_images.yml217
-rw-r--r--ansible_collections/community/okd/molecule/default/tasks/openshift_route.yml275
-rw-r--r--ansible_collections/community/okd/molecule/default/tasks/validate_installed.yml122
-rw-r--r--ansible_collections/community/okd/molecule/default/tasks/validate_not_installed.yml25
11 files changed, 2284 insertions, 0 deletions
diff --git a/ansible_collections/community/okd/molecule/default/tasks/openshift_adm_prune_auth_clusterroles.yml b/ansible_collections/community/okd/molecule/default/tasks/openshift_adm_prune_auth_clusterroles.yml
new file mode 100644
index 000000000..4de4894e2
--- /dev/null
+++ b/ansible_collections/community/okd/molecule/default/tasks/openshift_adm_prune_auth_clusterroles.yml
@@ -0,0 +1,318 @@
+- block:
+ - set_fact:
+ test_sa: "clusterrole-sa"
+ test_ns: "clusterrole-ns"
+
+ - name: Ensure namespace
+ kubernetes.core.k8s:
+ kind: Namespace
+ name: "{{ test_ns }}"
+
+ - name: Get cluster information
+ kubernetes.core.k8s_cluster_info:
+ register: cluster_info
+ no_log: true
+
+ - set_fact:
+ cluster_host: "{{ cluster_info['connection']['host'] }}"
+
+ - name: Create Service account
+ kubernetes.core.k8s:
+ definition:
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: "{{ test_sa }}"
+ namespace: "{{ test_ns }}"
+
+ - name: Read Service Account
+ kubernetes.core.k8s_info:
+ kind: ServiceAccount
+ namespace: "{{ test_ns }}"
+ name: "{{ test_sa }}"
+ register: result
+
+ - set_fact:
+ secret_token: "{{ result.resources[0]['secrets'][0]['name'] }}"
+
+ - name: Get secret details
+ kubernetes.core.k8s_info:
+ kind: Secret
+ namespace: '{{ test_ns }}'
+ name: '{{ secret_token }}'
+ register: _secret
+ retries: 10
+ delay: 10
+ until:
+ - ("'openshift.io/token-secret.value' in _secret.resources[0]['metadata']['annotations']") or ("'token' in _secret.resources[0]['data']")
+
+ - set_fact:
+ api_token: "{{ _secret.resources[0]['metadata']['annotations']['openshift.io/token-secret.value'] }}"
+ when: "'openshift.io/token-secret.value' in _secret.resources[0]['metadata']['annotations']"
+
+ - set_fact:
+ api_token: "{{ _secret.resources[0]['data']['token'] | b64decode }}"
+ when: "'token' in _secret.resources[0]['data']"
+
+ - name: list Node should failed (forbidden user)
+ kubernetes.core.k8s_info:
+ api_key: "{{ api_token }}"
+ host: "{{ cluster_host }}"
+ validate_certs: no
+ kind: Node
+ register: error
+ ignore_errors: true
+
+ - assert:
+ that:
+ - '"nodes is forbidden: User" in error.msg'
+
+ - name: list Pod for all namespace should failed
+ kubernetes.core.k8s_info:
+ api_key: "{{ api_token }}"
+ host: "{{ cluster_host }}"
+ validate_certs: no
+ kind: Pod
+ register: error
+ ignore_errors: true
+
+ - assert:
+ that:
+ - '"pods is forbidden: User" in error.msg'
+
+ - name: list Pod for test namespace should failed
+ kubernetes.core.k8s_info:
+ api_key: "{{ api_token }}"
+ host: "{{ cluster_host }}"
+ validate_certs: no
+ kind: Pod
+ namespace: "{{ test_ns }}"
+ register: error
+ ignore_errors: true
+
+ - assert:
+ that:
+ - '"pods is forbidden: User" in error.msg'
+
+ - set_fact:
+ test_labels:
+ phase: dev
+ cluster_roles:
+ - name: pod-manager
+ resources:
+ - pods
+ verbs:
+ - list
+ api_version_binding: "authorization.openshift.io/v1"
+ - name: node-manager
+ resources:
+ - nodes
+ verbs:
+ - list
+ api_version_binding: "rbac.authorization.k8s.io/v1"
+
+ - name: Create cluster roles
+ kubernetes.core.k8s:
+ definition:
+ kind: ClusterRole
+ apiVersion: "rbac.authorization.k8s.io/v1"
+ metadata:
+ name: "{{ item.name }}"
+ labels: "{{ test_labels }}"
+ rules:
+ - apiGroups: [""]
+ resources: "{{ item.resources }}"
+ verbs: "{{ item.verbs }}"
+ with_items: '{{ cluster_roles }}'
+
+ - name: Create Role Binding (namespaced)
+ kubernetes.core.k8s:
+ definition:
+ kind: RoleBinding
+ apiVersion: "rbac.authorization.k8s.io/v1"
+ metadata:
+ name: "{{ cluster_roles[0].name }}-binding"
+ namespace: "{{ test_ns }}"
+ labels: "{{ test_labels }}"
+ subjects:
+ - kind: ServiceAccount
+ name: "{{ test_sa }}"
+ namespace: "{{ test_ns }}"
+ apiGroup: ""
+ roleRef:
+ kind: ClusterRole
+ name: "{{ cluster_roles[0].name }}"
+ apiGroup: ""
+
+ - name: list Pod for all namespace should failed
+ kubernetes.core.k8s_info:
+ api_key: "{{ api_token }}"
+ host: "{{ cluster_host }}"
+ validate_certs: no
+ kind: Pod
+ register: error
+ ignore_errors: true
+
+ - assert:
+ that:
+ - '"pods is forbidden: User" in error.msg'
+
+ - name: list Pod for test namespace should succeed
+ kubernetes.core.k8s_info:
+ api_key: "{{ api_token }}"
+ host: "{{ cluster_host }}"
+ validate_certs: no
+ kind: Pod
+ namespace: "{{ test_ns }}"
+ no_log: true
+
+ - name: Create Cluster role Binding
+ kubernetes.core.k8s:
+ definition:
+ kind: ClusterRoleBinding
+ apiVersion: "{{ item.api_version_binding }}"
+ metadata:
+ name: "{{ item.name }}-binding"
+ labels: "{{ test_labels }}"
+ subjects:
+ - kind: ServiceAccount
+ name: "{{ test_sa }}"
+ namespace: "{{ test_ns }}"
+ apiGroup: ""
+ roleRef:
+ kind: ClusterRole
+ name: "{{ item.name }}"
+ apiGroup: ""
+ with_items: "{{ cluster_roles }}"
+
+ - name: list Pod for all namespace should succeed
+ kubernetes.core.k8s_info:
+ api_key: "{{ api_token }}"
+ host: "{{ cluster_host }}"
+ validate_certs: no
+ kind: Pod
+ no_log: true
+
+ - name: list Pod for test namespace should succeed
+ kubernetes.core.k8s_info:
+ api_key: "{{ api_token }}"
+ host: "{{ cluster_host }}"
+ validate_certs: no
+ kind: Pod
+ namespace: "{{ test_ns }}"
+ no_log: true
+
+ - name: list Node using ServiceAccount
+ kubernetes.core.k8s_info:
+ api_key: "{{ api_token }}"
+ host: "{{ cluster_host }}"
+ validate_certs: no
+ kind: Node
+ namespace: "{{ test_ns }}"
+ no_log: true
+
+ - name: Prune clusterroles (check mode)
+ community.okd.openshift_adm_prune_auth:
+ resource: clusterroles
+ label_selectors:
+ - phase=dev
+ register: check
+ check_mode: true
+
+ - name: validate clusterrole binding candidates for prune
+ assert:
+ that:
+ - '"{{ item.name }}-binding" in check.cluster_role_binding'
+ - '"{{ test_ns }}/{{ cluster_roles[0].name }}-binding" in check.role_binding'
+ with_items: "{{ cluster_roles }}"
+
+ - name: Prune Cluster Role for managing Pod
+ community.okd.openshift_adm_prune_auth:
+ resource: clusterroles
+ name: "{{ cluster_roles[0].name }}"
+
+ - name: list Pod for all namespace should failed
+ kubernetes.core.k8s_info:
+ api_key: "{{ api_token }}"
+ host: "{{ cluster_host }}"
+ validate_certs: no
+ kind: Pod
+ register: error
+ no_log: true
+ ignore_errors: true
+
+ - assert:
+ that:
+ - '"pods is forbidden: User" in error.msg'
+
+ - name: list Pod for test namespace should failed
+ kubernetes.core.k8s_info:
+ api_key: "{{ api_token }}"
+ host: "{{ cluster_host }}"
+ validate_certs: no
+ kind: Pod
+ namespace: "{{ test_ns }}"
+ register: error
+ no_log: true
+ ignore_errors: true
+
+ - assert:
+ that:
+ - '"pods is forbidden: User" in error.msg'
+
+ - name: list Node using ServiceAccount
+ kubernetes.core.k8s_info:
+ api_key: "{{ api_token }}"
+ host: "{{ cluster_host }}"
+ validate_certs: no
+ kind: Node
+ namespace: "{{ test_ns }}"
+ no_log: true
+
+ - name: Prune clusterroles (remaining)
+ community.okd.openshift_adm_prune_auth:
+ resource: clusterroles
+ label_selectors:
+ - phase=dev
+
+ - name: list Node using ServiceAccount should fail
+ kubernetes.core.k8s_info:
+ api_key: "{{ api_token }}"
+ host: "{{ cluster_host }}"
+ validate_certs: no
+ kind: Node
+ namespace: "{{ test_ns }}"
+ register: error
+ ignore_errors: true
+
+ - assert:
+ that:
+ - '"nodes is forbidden: User" in error.msg'
+
+ always:
+ - name: Ensure namespace is deleted
+ kubernetes.core.k8s:
+ state: absent
+ kind: Namespace
+ name: "{{ test_ns }}"
+ ignore_errors: true
+
+ - name: Delete ClusterRoleBinding
+ kubernetes.core.k8s:
+ kind: ClusterRoleBinding
+ api_version: "rbac.authorization.k8s.io/v1"
+ name: "{{ item.name }}-binding"
+ state: absent
+ ignore_errors: true
+ with_items: "{{ cluster_roles }}"
+ when: cluster_roles is defined
+
+ - name: Delete ClusterRole
+ kubernetes.core.k8s:
+ kind: ClusterRole
+ api_version: "rbac.authorization.k8s.io/v1"
+ name: "{{ item.name }}"
+ state: absent
+ ignore_errors: true
+ with_items: "{{ cluster_roles }}"
+ when: cluster_roles is defined
diff --git a/ansible_collections/community/okd/molecule/default/tasks/openshift_adm_prune_auth_roles.yml b/ansible_collections/community/okd/molecule/default/tasks/openshift_adm_prune_auth_roles.yml
new file mode 100644
index 000000000..1724a1938
--- /dev/null
+++ b/ansible_collections/community/okd/molecule/default/tasks/openshift_adm_prune_auth_roles.yml
@@ -0,0 +1,340 @@
+- block:
+ - set_fact:
+ test_ns: "prune-roles"
+ sa_name: "roles-sa"
+ pod_name: "pod-prune"
+ role_definition:
+ - name: pod-list
+ labels:
+ action: list
+ verbs:
+ - list
+ role_binding:
+ api_version: rbac.authorization.k8s.io/v1
+ - name: pod-create
+ labels:
+ action: create
+ verbs:
+ - create
+ - get
+ role_binding:
+ api_version: authorization.openshift.io/v1
+ - name: pod-delete
+ labels:
+ action: delete
+ verbs:
+ - delete
+ role_binding:
+ api_version: rbac.authorization.k8s.io/v1
+
+ - name: Ensure namespace
+ kubernetes.core.k8s:
+ kind: Namespace
+ name: '{{ test_ns }}'
+
+ - name: Get cluster information
+ kubernetes.core.k8s_cluster_info:
+ register: cluster_info
+ no_log: true
+
+ - set_fact:
+ cluster_host: "{{ cluster_info['connection']['host'] }}"
+
+ - name: Create Service account
+ kubernetes.core.k8s:
+ definition:
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: '{{ sa_name }}'
+ namespace: '{{ test_ns }}'
+
+ - name: Read Service Account
+ kubernetes.core.k8s_info:
+ kind: ServiceAccount
+ namespace: '{{ test_ns }}'
+ name: '{{ sa_name }}'
+ register: sa_out
+
+ - set_fact:
+ secret_token: "{{ sa_out.resources[0]['secrets'][0]['name'] }}"
+
+ - name: Get secret details
+ kubernetes.core.k8s_info:
+ kind: Secret
+ namespace: '{{ test_ns }}'
+ name: '{{ secret_token }}'
+ register: r_secret
+ retries: 10
+ delay: 10
+ until:
+ - ("'openshift.io/token-secret.value' in r_secret.resources[0]['metadata']['annotations']") or ("'token' in r_secret.resources[0]['data']")
+
+ - set_fact:
+ api_token: "{{ r_secret.resources[0]['metadata']['annotations']['openshift.io/token-secret.value'] }}"
+ when: "'openshift.io/token-secret.value' in r_secret.resources[0]['metadata']['annotations']"
+
+ - set_fact:
+ api_token: "{{ r_secret.resources[0]['data']['token'] | b64decode }}"
+ when: "'token' in r_secret.resources[0]['data']"
+
+ - name: list resources using service account
+ kubernetes.core.k8s_info:
+ api_key: '{{ api_token }}'
+ host: '{{ cluster_host }}'
+ validate_certs: no
+ kind: Pod
+ namespace: '{{ test_ns }}'
+ register: error
+ ignore_errors: true
+
+ - assert:
+ that:
+ - '"pods is forbidden: User" in error.msg'
+
+ - name: Create a role to manage Pod from namespace "{{ test_ns }}"
+ kubernetes.core.k8s:
+ definition:
+ kind: Role
+ apiVersion: rbac.authorization.k8s.io/v1
+ metadata:
+ namespace: "{{ test_ns }}"
+ name: "{{ item.name }}"
+ labels: "{{ item.labels }}"
+ rules:
+ - apiGroups: [""]
+ resources: ["pods"]
+ verbs: "{{ item.verbs }}"
+ with_items: "{{ role_definition }}"
+
+ - name: Create Role Binding
+ kubernetes.core.k8s:
+ definition:
+ kind: RoleBinding
+ apiVersion: "{{ item.role_binding.api_version }}"
+ metadata:
+ name: "{{ item.name }}-bind"
+ namespace: "{{ test_ns }}"
+ subjects:
+ - kind: ServiceAccount
+ name: "{{ sa_name }}"
+ namespace: "{{ test_ns }}"
+ apiGroup: ""
+ roleRef:
+ kind: Role
+ name: "{{ item.name }}"
+ namespace: "{{ test_ns }}"
+ apiGroup: ""
+ with_items: "{{ role_definition }}"
+
+ - name: Create Pod should succeed
+ kubernetes.core.k8s:
+ api_key: "{{ api_token }}"
+ host: "{{ cluster_host }}"
+ validate_certs: no
+ namespace: "{{ test_ns }}"
+ definition:
+ kind: Pod
+ metadata:
+ name: "{{ pod_name }}"
+ spec:
+ containers:
+ - name: python
+ image: python:3.7-alpine
+ command:
+ - /bin/sh
+ - -c
+ - while true; do echo $(date); sleep 15; done
+ imagePullPolicy: IfNotPresent
+ register: result
+
+ - name: assert pod creation succeed
+ assert:
+ that:
+ - result is successful
+
+ - name: List Pod
+ kubernetes.core.k8s_info:
+ api_key: "{{ api_token }}"
+ host: "{{ cluster_host }}"
+ validate_certs: no
+ namespace: "{{ test_ns }}"
+ kind: Pod
+ register: result
+
+ - name: assert user is still authorize to list pods
+ assert:
+ that:
+ - result is successful
+
+ - name: Prune auth roles (check mode)
+ community.okd.openshift_adm_prune_auth:
+ resource: roles
+ namespace: "{{ test_ns }}"
+ register: check
+ check_mode: true
+
+ - name: validate that list role binding are candidates for prune
+ assert:
+ that: '"{{ test_ns }}/{{ item.name }}-bind" in check.role_binding'
+ with_items: "{{ role_definition }}"
+
+ - name: Prune resource using label_selectors option
+ community.okd.openshift_adm_prune_auth:
+ resource: roles
+ namespace: "{{ test_ns }}"
+ label_selectors:
+ - action=delete
+ register: prune
+
+ - name: assert that role binding 'delete' was pruned
+ assert:
+ that:
+ - prune is changed
+ - '"{{ test_ns }}/{{ role_definition[2].name }}-bind" in check.role_binding'
+
+ - name: assert that user could not delete pod anymore
+ kubernetes.core.k8s:
+ api_key: "{{ api_token }}"
+ host: "{{ cluster_host }}"
+ validate_certs: no
+ state: absent
+ namespace: "{{ test_ns }}"
+ kind: Pod
+ name: "{{ pod_name }}"
+ register: result
+ ignore_errors: true
+
+ - name: assert pod deletion failed due to forbidden user
+ assert:
+ that:
+ - '"forbidden: User" in error.msg'
+
+ - name: List Pod
+ kubernetes.core.k8s_info:
+ api_key: "{{ api_token }}"
+ host: "{{ cluster_host }}"
+ validate_certs: no
+ namespace: "{{ test_ns }}"
+ kind: Pod
+ register: result
+
+ - name: assert user is still able to list pods
+ assert:
+ that:
+ - result is successful
+
+ - name: Create Pod should succeed
+ kubernetes.core.k8s:
+ api_key: "{{ api_token }}"
+ host: "{{ cluster_host }}"
+ validate_certs: no
+ namespace: "{{ test_ns }}"
+ definition:
+ kind: Pod
+ metadata:
+ name: "{{ pod_name }}-1"
+ spec:
+ containers:
+ - name: python
+ image: python:3.7-alpine
+ command:
+ - /bin/sh
+ - -c
+ - while true; do echo $(date); sleep 15; done
+ imagePullPolicy: IfNotPresent
+ register: result
+
+ - name: assert user is still authorize to create pod
+ assert:
+ that:
+ - result is successful
+
+ - name: Prune role using name
+ community.okd.openshift_adm_prune_auth:
+ resource: roles
+ namespace: "{{ test_ns }}"
+ name: "{{ role_definition[1].name }}"
+ register: prune
+
+ - name: assert that role binding 'create' was pruned
+ assert:
+ that:
+ - prune is changed
+ - '"{{ test_ns }}/{{ role_definition[1].name }}-bind" in check.role_binding'
+
+ - name: Create Pod (should failed)
+ kubernetes.core.k8s:
+ api_key: "{{ api_token }}"
+ host: "{{ cluster_host }}"
+ validate_certs: no
+ namespace: "{{ test_ns }}"
+ definition:
+ kind: Pod
+ metadata:
+ name: "{{ pod_name }}-2"
+ spec:
+ containers:
+ - name: python
+ image: python:3.7-alpine
+ command:
+ - /bin/sh
+ - -c
+ - while true; do echo $(date); sleep 15; done
+ imagePullPolicy: IfNotPresent
+ register: result
+ ignore_errors: true
+
+ - name: assert user is not authorize to create pod anymore
+ assert:
+ that:
+ - '"forbidden: User" in error.msg'
+
+ - name: List Pod
+ kubernetes.core.k8s_info:
+ api_key: "{{ api_token }}"
+ host: "{{ cluster_host }}"
+ validate_certs: no
+ namespace: "{{ test_ns }}"
+ kind: Pod
+ register: result
+
+ - name: assert user is still able to list pods
+ assert:
+ that:
+ - result is successful
+
+ - name: Prune all role for namespace (neither name nor label_selectors are specified)
+ community.okd.openshift_adm_prune_auth:
+ resource: roles
+ namespace: "{{ test_ns }}"
+ register: prune
+
+ - name: assert that role binding 'list' was pruned
+ assert:
+ that:
+ - prune is changed
+ - '"{{ test_ns }}/{{ role_definition[0].name }}-bind" in check.role_binding'
+
+ - name: List Pod
+ kubernetes.core.k8s_info:
+ api_key: "{{ api_token }}"
+ host: "{{ cluster_host }}"
+ validate_certs: no
+ namespace: "{{ test_ns }}"
+ kind: Pod
+ register: result
+ ignore_errors: true
+
+ - name: assert user is not authorize to list pod anymore
+ assert:
+ that:
+ - '"forbidden: User" in error.msg'
+
+ always:
+ - name: Ensure namespace is deleted
+ kubernetes.core.k8s:
+ state: absent
+ kind: Namespace
+ name: "{{ test_ns }}"
+ ignore_errors: true
diff --git a/ansible_collections/community/okd/molecule/default/tasks/openshift_adm_prune_deployments.yml b/ansible_collections/community/okd/molecule/default/tasks/openshift_adm_prune_deployments.yml
new file mode 100644
index 000000000..baa024188
--- /dev/null
+++ b/ansible_collections/community/okd/molecule/default/tasks/openshift_adm_prune_deployments.yml
@@ -0,0 +1,269 @@
+- name: Prune deployments
+ block:
+ - set_fact:
+ dc_name: "hello"
+ deployment_ns: "prune-deployments"
+ deployment_ns_2: "prune-deployments-2"
+
+
+ - name: Ensure namespace
+ community.okd.k8s:
+ kind: Namespace
+ name: '{{ deployment_ns }}'
+
+ - name: Create deployment config
+ community.okd.k8s:
+ namespace: '{{ deployment_ns }}'
+ definition:
+ kind: DeploymentConfig
+ apiVersion: apps.openshift.io/v1
+ metadata:
+ name: '{{ dc_name }}'
+ spec:
+ replicas: 1
+ selector:
+ name: '{{ dc_name }}'
+ template:
+ metadata:
+ labels:
+ name: '{{ dc_name }}'
+ spec:
+ containers:
+ - name: hello-openshift
+ imagePullPolicy: IfNotPresent
+ image: python:3.7-alpine
+ command: [ "/bin/sh", "-c", "while true;do date;sleep 2s; done"]
+ wait: yes
+
+ - name: prune deployments (no candidate DeploymentConfig)
+ community.okd.openshift_adm_prune_deployments:
+ namespace: "{{ deployment_ns }}"
+ register: test_prune
+
+ - assert:
+ that:
+ - test_prune is not changed
+ - test_prune.replication_controllers | length == 0
+
+ - name: Update DeploymentConfig - set replicas to 0
+ community.okd.k8s:
+ namespace: "{{ deployment_ns }}"
+ definition:
+ kind: DeploymentConfig
+ apiVersion: "apps.openshift.io/v1"
+ metadata:
+ name: "{{ dc_name }}"
+ spec:
+ replicas: 0
+ selector:
+ name: "{{ dc_name }}"
+ template:
+ metadata:
+ labels:
+ name: "{{ dc_name }}"
+ spec:
+ containers:
+ - name: hello-openshift
+ imagePullPolicy: IfNotPresent
+ image: python:3.7-alpine
+ command: [ "/bin/sh", "-c", "while true;do date;sleep 2s; done"]
+ wait: yes
+
+ - name: Wait for ReplicationController candidate for pruning
+ kubernetes.core.k8s_info:
+ kind: ReplicationController
+ namespace: "{{ deployment_ns }}"
+ register: result
+ retries: 10
+ delay: 30
+ until:
+ - result.resources.0.metadata.annotations["openshift.io/deployment.phase"] in ("Failed", "Complete")
+
+ - name: Prune deployments - should delete 1 ReplicationController
+ community.okd.openshift_adm_prune_deployments:
+ namespace: "{{ deployment_ns }}"
+ check_mode: yes
+ register: test_prune
+
+ - name: Read ReplicationController
+ kubernetes.core.k8s_info:
+ kind: ReplicationController
+ namespace: "{{ deployment_ns }}"
+ register: replications
+
+ - name: Assert that Replication controller was not deleted
+ assert:
+ that:
+ - replications.resources | length == 1
+ - 'replications.resources.0.metadata.name is match("{{ dc_name }}-*")'
+
+ - name: Assure that candidate ReplicationController was found for pruning
+ assert:
+ that:
+ - test_prune is changed
+ - test_prune.replication_controllers | length == 1
+ - test_prune.replication_controllers.0.metadata.name == replications.resources.0.metadata.name
+ - test_prune.replication_controllers.0.metadata.namespace == replications.resources.0.metadata.namespace
+
+ - name: Prune deployments - keep younger than 45min (check_mode)
+ community.okd.openshift_adm_prune_deployments:
+ keep_younger_than: 45
+ namespace: "{{ deployment_ns }}"
+ check_mode: true
+ register: keep_younger
+
+ - name: assert no candidate was found
+ assert:
+ that:
+ - keep_younger is not changed
+ - keep_younger.replication_controllers == []
+
+ - name: Ensure second namespace is created
+ community.okd.k8s:
+ kind: Namespace
+ name: '{{ deployment_ns_2 }}'
+
+ - name: Create deployment config from 2nd namespace
+ community.okd.k8s:
+ namespace: '{{ deployment_ns_2 }}'
+ definition:
+ kind: DeploymentConfig
+ apiVersion: apps.openshift.io/v1
+ metadata:
+ name: '{{ dc_name }}2'
+ spec:
+ replicas: 1
+ selector:
+ name: '{{ dc_name }}2'
+ template:
+ metadata:
+ labels:
+ name: '{{ dc_name }}2'
+ spec:
+ containers:
+ - name: hello-openshift
+ imagePullPolicy: IfNotPresent
+ image: python:3.7-alpine
+ command: [ "/bin/sh", "-c", "while true;do date;sleep 2s; done"]
+ wait: yes
+
+ - name: Stop deployment config - replicas = 0
+ community.okd.k8s:
+ namespace: '{{ deployment_ns_2 }}'
+ definition:
+ kind: DeploymentConfig
+ apiVersion: apps.openshift.io/v1
+ metadata:
+ name: '{{ dc_name }}2'
+ spec:
+ replicas: 0
+ selector:
+ name: '{{ dc_name }}2'
+ template:
+ metadata:
+ labels:
+ name: '{{ dc_name }}2'
+ spec:
+ containers:
+ - name: hello-openshift
+ imagePullPolicy: IfNotPresent
+ image: python:3.7-alpine
+ command: [ "/bin/sh", "-c", "while true;do date;sleep 2s; done"]
+ wait: yes
+
+ - name: Wait for ReplicationController candidate for pruning
+ kubernetes.core.k8s_info:
+ kind: ReplicationController
+ namespace: "{{ deployment_ns_2 }}"
+ register: result
+ retries: 10
+ delay: 30
+ until:
+ - result.resources.0.metadata.annotations["openshift.io/deployment.phase"] in ("Failed", "Complete")
+
+ # Prune from one namespace should not have any effect on others namespaces
+ - name: Prune deployments from 2nd namespace
+ community.okd.openshift_adm_prune_deployments:
+ namespace: "{{ deployment_ns_2 }}"
+ check_mode: yes
+ register: test_prune
+
+ - name: Assure that candidate ReplicationController was found for pruning
+ assert:
+ that:
+ - test_prune is changed
+ - test_prune.replication_controllers | length == 1
+ - "test_prune.replication_controllers.0.metadata.namespace == deployment_ns_2"
+
+ # Prune without namespace option
+ - name: Prune from all namespace should update more deployments
+ community.okd.openshift_adm_prune_deployments:
+ check_mode: yes
+ register: no_namespace_prune
+
+ - name: Assure multiple ReplicationController were found for pruning
+ assert:
+ that:
+ - no_namespace_prune is changed
+ - no_namespace_prune.replication_controllers | length == 2
+
+ # Execute Prune from 2nd namespace
+ - name: Read ReplicationController before Prune operation
+ kubernetes.core.k8s_info:
+ kind: ReplicationController
+ namespace: "{{ deployment_ns_2 }}"
+ register: replications
+
+ - assert:
+ that:
+ - replications.resources | length == 1
+
+ - name: Prune DeploymentConfig from 2nd namespace
+ community.okd.openshift_adm_prune_deployments:
+ namespace: "{{ deployment_ns_2 }}"
+ register: _prune
+
+ - name: Assert DeploymentConfig was deleted
+ assert:
+ that:
+ - _prune is changed
+ - _prune.replication_controllers | length == 1
+ - _prune.replication_controllers.0.details.name == replications.resources.0.metadata.name
+
+ # Execute Prune without namespace option
+ - name: Read ReplicationController before Prune operation
+ kubernetes.core.k8s_info:
+ kind: ReplicationController
+ namespace: "{{ deployment_ns }}"
+ register: replications
+
+ - assert:
+ that:
+ - replications.resources | length == 1
+
+ - name: Prune from all namespace should update more deployments
+ community.okd.openshift_adm_prune_deployments:
+ register: _prune
+
+ - name: Assure multiple ReplicationController were found for pruning
+ assert:
+ that:
+ - _prune is changed
+ - _prune.replication_controllers | length > 0
+
+ always:
+ - name: Delete 1st namespace
+ community.okd.k8s:
+ state: absent
+ kind: Namespace
+ name: "{{ deployment_ns }}"
+ ignore_errors: yes
+ when: deployment_ns is defined
+
+ - name: Delete 2nd namespace
+ community.okd.k8s:
+ state: absent
+ kind: Namespace
+ name: "{{ deployment_ns_2 }}"
+ ignore_errors: yes
+ when: deployment_ns_2 is defined \ No newline at end of file
diff --git a/ansible_collections/community/okd/molecule/default/tasks/openshift_auth.yml b/ansible_collections/community/okd/molecule/default/tasks/openshift_auth.yml
new file mode 100644
index 000000000..aeeee4c72
--- /dev/null
+++ b/ansible_collections/community/okd/molecule/default/tasks/openshift_auth.yml
@@ -0,0 +1,111 @@
+---
+- block:
+ - set_fact:
+ admin_user: test
+ admin_pass: testing123
+
+ - name: Retrieve cluster info
+ kubernetes.core.k8s_cluster_info:
+ register: k8s_cluster
+
+ - name: set openshift host value
+ set_fact:
+ openshift_host: "{{ k8s_cluster.connection.host }}"
+
+ - name: Log in (obtain access token)
+ community.okd.openshift_auth:
+ username: "{{ admin_user }}"
+ password: "{{ admin_pass }}"
+ host: '{{ openshift_host }}'
+ verify_ssl: false
+ register: openshift_auth_results
+
+ - set_fact:
+ auth_api_key: "{{ openshift_auth_results.openshift_auth.api_key }}"
+
+ - name: "Get the {{ admin_user }} User"
+ kubernetes.core.k8s_info:
+ api_key: "{{ auth_api_key }}"
+ host: '{{ openshift_host }}'
+ verify_ssl: false
+ kind: User
+ api_version: user.openshift.io/v1
+ name: "{{ admin_user }}"
+ register: user_result
+
+ - name: assert that the user was found
+ assert:
+ that: (user_result.resources | length) == 1
+
+ - name: list available tokens
+ kubernetes.core.k8s_info:
+ kind: UserOAuthAccessToken
+ version: oauth.openshift.io/v1
+ register: tokens
+
+ - debug: var=tokens
+
+ - set_fact:
+ token_names: "{{ tokens.resources | map(attribute='metadata.name') | list }}"
+
+ - block:
+ - debug: var=token_names
+
+ - name: Revoke access token
+ community.okd.openshift_auth:
+ state: absent
+ api_key: "{{ auth_api_key }}"
+ host: '{{ openshift_host }}'
+ verify_ssl: false
+ register: _revoke
+
+ - name: Ensure that token has been revoked
+ assert:
+ that:
+ - _revoke is changed
+
+ - name: "Get the {{ admin_user }} User (after token deletion)"
+ kubernetes.core.k8s_info:
+ api_key: "{{ auth_api_key }}"
+ host: '{{ openshift_host }}'
+ verify_ssl: false
+ kind: User
+ api_version: user.openshift.io/v1
+ name: "{{ admin_user }}"
+ ignore_errors: true
+ retries: 50
+ until: user_result is failed
+ delay: 20
+ register: user_result
+
+ - name: Ensure that task has failed due to revoked token
+ assert:
+ that:
+ - user_result is failed
+
+ - name: Revoke access token once again (should fail)
+ community.okd.openshift_auth:
+ state: absent
+ api_key: "{{ auth_api_key }}"
+ host: '{{ openshift_host }}'
+ verify_ssl: false
+ register: _revoke
+ ignore_errors: true
+
+ - name: Ensure that nothing changed
+ assert:
+ that:
+ - _revoke is failed
+ - _revoke.msg.startswith("Couldn't delete user oauth access token")
+
+ when: token_names | length > 0
+
+ always:
+ - name: If login succeeded, try to log out (revoke access token)
+ when: auth_api_key is defined
+ community.okd.openshift_auth:
+ state: absent
+ api_key: "{{ auth_api_key }}"
+ host: '{{ openshift_host }}'
+ verify_ssl: false
+ ignore_errors: true
diff --git a/ansible_collections/community/okd/molecule/default/tasks/openshift_builds.yml b/ansible_collections/community/okd/molecule/default/tasks/openshift_builds.yml
new file mode 100644
index 000000000..b564f8bcd
--- /dev/null
+++ b/ansible_collections/community/okd/molecule/default/tasks/openshift_builds.yml
@@ -0,0 +1,245 @@
+- block:
+ - set_fact:
+ build_ns: "builds"
+ build_config: "start-build"
+ is_name: "ruby"
+ prune_build: "prune-build"
+
+ - name: Ensure namespace
+ kubernetes.core.k8s:
+ kind: Namespace
+ name: "{{ build_ns }}"
+
+ - name: Create ImageStream
+ community.okd.k8s:
+ namespace: "{{ build_ns }}"
+ definition:
+ apiVersion: image.openshift.io/v1
+ kind: ImageStream
+ metadata:
+ name: "{{ is_name }}"
+ spec:
+ lookupPolicy:
+ local: false
+ tags: []
+
+ - name: Create build configuration
+ community.okd.k8s:
+ namespace: "{{ build_ns }}"
+ definition:
+ kind: BuildConfig
+ apiVersion: build.openshift.io/v1
+ metadata:
+ name: "{{ build_config }}"
+ spec:
+ source:
+ dockerfile: |
+ FROM openshift/ruby-22-centos7
+ RUN sleep 60s
+ USER ansible
+ strategy:
+ type: Docker
+ output:
+ to:
+ kind: "ImageStreamTag"
+ name: "{{ is_name }}:latest"
+
+ - name: Start Build from Build configuration
+ community.okd.openshift_build:
+ namespace: "{{ build_ns }}"
+ build_config_name: "{{ build_config }}"
+ register: new_build
+
+ - name: Assert that a build has been created
+ assert:
+ that:
+ - new_build is changed
+ - new_build.builds.0.metadata.name == "{{ build_config }}-1"
+
+ - name: Start a new Build from previous Build
+ community.okd.openshift_build:
+ namespace: "{{ build_ns }}"
+ build_name: "{{ new_build.builds.0.metadata.name }}"
+ register: rerun_build
+
+ - name: Assert that another build has been created
+ assert:
+ that:
+ - rerun_build is changed
+ - rerun_build.builds.0.metadata.name == "{{ build_config }}-2"
+
+ - name: Cancel first build created
+ community.okd.openshift_build:
+ namespace: "{{ build_ns }}"
+ build_name: "{{ build_config }}-1"
+ state: cancelled
+ wait: yes
+ register: cancel
+
+ - name: Assert that the Build was cancelled
+ assert:
+ that:
+ - cancel is changed
+ - cancel.builds | length == 1
+ - cancel.builds.0.metadata.name == "{{ build_config }}-1"
+ - cancel.builds.0.metadata.namespace == "{{ build_ns }}"
+ - cancel.builds.0.status.cancelled
+
+ - name: Get Build info
+ kubernetes.core.k8s_info:
+ version: build.openshift.io/v1
+ kind: Build
+ namespace: "{{ build_ns }}"
+ name: "{{ cancel.builds.0.metadata.name }}"
+ register: build
+
+ - name: Assert that build phase is cancelled
+ assert:
+ that:
+ - build.resources | length == 1
+ - build.resources.0.status.cancelled
+ - build.resources.0.status.phase == 'Cancelled'
+
+ - name: Cancel and restart Build using build config name
+ community.okd.openshift_build:
+ namespace: "{{ build_ns }}"
+ build_config_name: "{{ build_config }}"
+ state: restarted
+ build_phases:
+ - Running
+ - New
+ register: restart
+
+ - name: assert that new build was created
+ assert:
+ that:
+ - restart is changed
+ - restart.builds | length == 1
+ - 'restart.builds.0.metadata.name == "{{ build_config }}-3"'
+
+ - name: Get Build 2 info
+ kubernetes.core.k8s_info:
+ version: build.openshift.io/v1
+ kind: Build
+ namespace: "{{ build_ns }}"
+ name: "{{ build_config }}-2"
+ register: build
+
+ - name: Assert that build phase is cancelled
+ assert:
+ that:
+ - build.resources | length == 1
+ - build.resources.0.status.cancelled
+ - build.resources.0.status.phase == 'Cancelled'
+
+ - name: Get Build info
+ kubernetes.core.k8s_info:
+ version: build.openshift.io/v1
+ kind: Build
+ namespace: "{{ build_ns }}"
+ name: "{{ build_config }}-3"
+ register: build
+
+ - name: Assert that Build is not cancelled
+ assert:
+ that:
+ - build.resources | length == 1
+ - '"cancelled" not in build.resources.0.status'
+ - "build.resources.0.status.phase in ('New', 'Pending', 'Running')"
+
+ - name: Prune Builds keep younger than 30min
+ community.okd.openshift_adm_prune_builds:
+ keep_younger_than: 30
+ namespace: "{{ build_ns }}"
+ register: prune
+ check_mode: yes
+
+ - name: Assert that no Builds were found
+ assert:
+ that:
+ - not prune.changed
+ - prune.builds | length == 0
+
+ - name: Prune Builds without namespace
+ community.okd.openshift_adm_prune_builds:
+ register: prune_without_ns
+ check_mode: yes
+
+ - name: Assert that completed build are candidate for prune
+ assert:
+ that:
+ - prune_without_ns is changed
+ - prune_without_ns.builds | length > 0
+ - '"{{ build_config }}-1" in build_names'
+ - '"{{ build_config }}-2" in build_names'
+ vars:
+ build_names: '{{ prune_without_ns.builds | map(attribute="metadata") | flatten | map(attribute="name") | list }}'
+
+ - name: Prune Builds using namespace
+ community.okd.openshift_adm_prune_builds:
+ namespace: "{{ build_ns }}"
+ register: prune_with_ns
+ check_mode: yes
+
+ - name: Assert that prune operation found the completed build
+ assert:
+ that:
+ - prune_with_ns is changed
+ - prune_with_ns.builds | length == 2
+
+ - name: Check Build before prune
+ kubernetes.core.k8s_info:
+ kind: Build
+ api_version: build.openshift.io/v1
+ name: "{{ build_config }}-1"
+ namespace: "{{ build_ns }}"
+ register: resource
+
+ - name: Validate that any previous build operation executed with check_mode did not deleted the build
+ assert:
+ that:
+ - resource.resources | length == 1
+
+ - name: Execute prune operation
+ community.okd.openshift_adm_prune_builds:
+ namespace: "{{ build_ns }}"
+ register: prune
+
+ - name: assert prune is changed
+ assert:
+ that:
+ - prune is changed
+
+ - name: Check Build
+ kubernetes.core.k8s_info:
+ kind: Build
+ api_version: build.openshift.io/v1
+ name: "{{ build_config }}-1"
+ namespace: "{{ build_ns }}"
+ register: resource
+
+ - name: Assert that the Build does not exist anymore
+ assert:
+ that:
+ - resource.resources | length == 0
+
+ - name: Check Build
+ kubernetes.core.k8s_info:
+ kind: Build
+ api_version: build.openshift.io/v1
+ name: "{{ build_config }}-2"
+ namespace: "{{ build_ns }}"
+ register: resource
+
+ - name: Assert that the Build does not exist anymore
+ assert:
+ that:
+ - resource.resources | length == 0
+
+ always:
+ - name: Ensure namespace is deleted
+ kubernetes.core.k8s:
+ state: absent
+ kind: Namespace
+ name: "{{ build_ns }}"
+ ignore_errors: true
diff --git a/ansible_collections/community/okd/molecule/default/tasks/openshift_import_images.yml b/ansible_collections/community/okd/molecule/default/tasks/openshift_import_images.yml
new file mode 100644
index 000000000..04392bb26
--- /dev/null
+++ b/ansible_collections/community/okd/molecule/default/tasks/openshift_import_images.yml
@@ -0,0 +1,179 @@
+- name: Openshift import image testing
+ block:
+
+ - set_fact:
+ test_ns: "import-images"
+
+ - name: Ensure namespace
+ community.okd.k8s:
+ kind: Namespace
+ name: '{{ test_ns }}'
+
+ - name: Import image using tag (should import latest tag only)
+ community.okd.openshift_import_image:
+ namespace: "{{ test_ns }}"
+ name: "ansible/awx"
+ check_mode: yes
+ register: import_tag
+
+ - name: Assert only latest was imported
+ assert:
+ that:
+ - import_tag is changed
+ - import_tag.result | length == 1
+ - import_tag.result.0.spec.import
+ - import_tag.result.0.spec.images.0.from.kind == "DockerImage"
+ - import_tag.result.0.spec.images.0.from.name == "ansible/awx"
+
+ - name: check image stream
+ kubernetes.core.k8s_info:
+ kind: ImageStream
+ namespace: "{{ test_ns }}"
+ name: awx
+ register: resource
+
+ - name: assert that image stream is not created when using check_mode=yes
+ assert:
+ that:
+ - resource.resources == []
+
+ - name: Import image using tag (should import latest tag only)
+ community.okd.openshift_import_image:
+ namespace: "{{ test_ns }}"
+ name: "ansible/awx"
+ register: import_tag
+
+ - name: Assert only latest was imported
+ assert:
+ that:
+ - import_tag is changed
+
+ - name: check image stream
+ kubernetes.core.k8s_info:
+ kind: ImageStream
+ namespace: "{{ test_ns }}"
+ name: awx
+ register: resource
+
+ - name: assert that image stream contains only tag latest
+ assert:
+ that:
+ - resource.resources | length == 1
+ - resource.resources.0.status.tags.0.tag == 'latest'
+
+ - name: Import once again the latest tag
+ community.okd.openshift_import_image:
+ namespace: "{{ test_ns }}"
+ name: "ansible/awx"
+ register: import_tag
+
+ - name: assert change was performed
+ assert:
+ that:
+ - import_tag is changed
+
+ - name: check image stream
+ kubernetes.core.k8s_info:
+ kind: ImageStream
+ version: image.openshift.io/v1
+ namespace: "{{ test_ns }}"
+ name: awx
+ register: resource
+
+ - name: assert that image stream still contains unique tag
+ assert:
+ that:
+ - resource.resources | length == 1
+ - resource.resources.0.status.tags.0.tag == 'latest'
+
+ - name: Import another tags
+ community.okd.openshift_import_image:
+ namespace: "{{ test_ns }}"
+ name: "ansible/awx:17.1.0"
+ register: import_another_tag
+ ignore_errors: yes
+
+ - name: assert that another tag was imported
+ assert:
+ that:
+ - import_another_tag is failed
+ - '"the tag 17.1.0 does not exist on the image stream" in import_another_tag.msg'
+
+ - name: Create simple ImageStream (without docker external container)
+ community.okd.k8s:
+ namespace: "{{ test_ns }}"
+ name: "local-is"
+ definition:
+ apiVersion: image.openshift.io/v1
+ kind: ImageStream
+ spec:
+ lookupPolicy:
+ local: false
+ tags: []
+
+ - name: Import all tag for image stream not pointing on external container image should failed
+ community.okd.openshift_import_image:
+ namespace: "{{ test_ns }}"
+ name: "local-is"
+ all: true
+ register: error_tag
+ ignore_errors: true
+ check_mode: yes
+
+ - name: Assert module cannot import from non-existing tag from ImageStream
+ assert:
+ that:
+ - error_tag is failed
+ - 'error_tag.msg == "image stream {{ test_ns }}/local-is does not have tags pointing to external container images"'
+
+ - name: import all tags for container image ibmcom/pause and specific tag for redhat/ubi8-micro
+ community.okd.openshift_import_image:
+ namespace: "{{ test_ns }}"
+ name:
+ - "ibmcom/pause"
+ - "redhat/ubi8-micro:8.5-437"
+ all: true
+ register: multiple_import
+
+ - name: Assert that import succeed
+ assert:
+ that:
+ - multiple_import is changed
+ - multiple_import.result | length == 2
+
+ - name: Read ibmcom/pause ImageStream
+ kubernetes.core.k8s_info:
+ version: image.openshift.io/v1
+ kind: ImageStream
+ namespace: "{{ test_ns }}"
+ name: pause
+ register: pause
+
+ - name: assert that ibmcom/pause has multiple tags
+ assert:
+ that:
+ - pause.resources | length == 1
+ - pause.resources.0.status.tags | length > 1
+
+ - name: Read redhat/ubi8-micro ImageStream
+ kubernetes.core.k8s_info:
+ version: image.openshift.io/v1
+ kind: ImageStream
+ namespace: "{{ test_ns }}"
+ name: ubi8-micro
+ register: resource
+
+ - name: assert that redhat/ubi8-micro has only one tag
+ assert:
+ that:
+ - resource.resources | length == 1
+ - resource.resources.0.status.tags | length == 1
+ - 'resource.resources.0.status.tags.0.tag == "8.5-437"'
+
+ always:
+ - name: Delete testing namespace
+ community.okd.k8s:
+ state: absent
+ kind: Namespace
+ name: "{{ test_ns }}"
+ ignore_errors: yes
diff --git a/ansible_collections/community/okd/molecule/default/tasks/openshift_process.yml b/ansible_collections/community/okd/molecule/default/tasks/openshift_process.yml
new file mode 100644
index 000000000..4341bf21c
--- /dev/null
+++ b/ansible_collections/community/okd/molecule/default/tasks/openshift_process.yml
@@ -0,0 +1,183 @@
+---
+
+- name: Process a template in the cluster
+ community.okd.openshift_process:
+ name: nginx-example
+ namespace: openshift # only needed if using a template already on the server
+ parameters:
+ NAMESPACE: openshift
+ NAME: test123
+ register: result
+
+- name: Create the rendered resources
+ community.okd.k8s:
+ namespace: process-test
+ definition: '{{ item }}'
+ wait: yes
+ apply: yes
+ loop: '{{ result.resources }}'
+
+- name: Delete the rendered resources
+ community.okd.k8s:
+ namespace: process-test
+ definition: '{{ item }}'
+ wait: yes
+ state: absent
+ loop: '{{ result.resources }}'
+
+- name: Process a template and create the resources in the cluster
+ community.okd.openshift_process:
+ name: nginx-example
+ namespace: openshift # only needed if using a template already on the server
+ parameters:
+ NAMESPACE: openshift
+ NAME: test123
+ state: present
+ namespace_target: process-test
+ register: result
+
+- name: Process a template and update the resources in the cluster
+ community.okd.openshift_process:
+ name: nginx-example
+ namespace: openshift # only needed if using a template already on the server
+ parameters:
+ NAMESPACE: openshift
+ NAME: test123
+ MEMORY_LIMIT: 1Gi
+ state: present
+ namespace_target: process-test
+ register: result
+
+- name: Process a template and delete the resources in the cluster
+ community.okd.openshift_process:
+ name: nginx-example
+ namespace: openshift # only needed if using a template already on the server
+ parameters:
+ NAMESPACE: openshift
+ NAME: test123
+ state: absent
+ namespace_target: process-test
+ register: result
+
+- name: Process a template with parameters from an env file and create the resources
+ community.okd.openshift_process:
+ name: nginx-example
+ namespace: openshift
+ namespace_target: process-test
+ parameter_file: '{{ files_dir }}/nginx.env'
+ state: present
+ wait: yes
+
+- name: Process a template with parameters from an env file and delete the resources
+ community.okd.openshift_process:
+ name: nginx-example
+ namespace: openshift
+ namespace_target: process-test
+ parameter_file: '{{ files_dir }}/nginx.env'
+ state: absent
+ wait: yes
+
+
+- name: Process a template with duplicate values
+ community.okd.openshift_process:
+ name: nginx-example
+ namespace: openshift # only needed if using a template already on the server
+ parameters:
+ NAME: test123
+ parameter_file: '{{ files_dir }}/nginx.env'
+ ignore_errors: yes
+ register: result
+
+- name: Assert the expected failure occurred
+ assert:
+ that:
+ - result.msg is defined
+ - result.msg == "Duplicate value for 'NAME' detected in parameter file"
+
+- name: Process a local template
+ community.okd.openshift_process:
+ src: '{{ files_dir }}/simple-template.yaml'
+ parameter_file: '{{ files_dir }}/example.env'
+ register: rendered
+
+- name: Process a local template and create the resources
+ community.okd.openshift_process:
+ src: '{{ files_dir }}/simple-template.yaml'
+ parameter_file: '{{ files_dir }}/example.env'
+ namespace_target: process-test
+ state: present
+ register: result
+
+- assert:
+ that: result is changed
+
+- name: Create the processed resources
+ community.okd.k8s:
+ namespace: process-test
+ definition: '{{ item }}'
+ loop: '{{ rendered.resources }}'
+ register: result
+
+- assert:
+ that: result is not changed
+
+- name: Process a local template and create the resources
+ community.okd.openshift_process:
+ definition: "{{ lookup('template', files_dir + '/simple-template.yaml') | from_yaml }}"
+ parameter_file: '{{ files_dir }}/example.env'
+ namespace_target: process-test
+ state: present
+ register: result
+
+- assert:
+ that: result is not changed
+
+- name: Get the created configmap
+ kubernetes.core.k8s_info:
+ api_version: v1
+ kind: ConfigMap
+ name: example
+ namespace: process-test
+ register: templated_cm
+
+- assert:
+ that:
+ - (templated_cm.resources | length) == 1
+ - templated_cm.resources.0.data.content is defined
+ - templated_cm.resources.0.data.content == "This is a long message that may take one or more lines to parse but should still work without issue"
+
+- name: Create the Template resource
+ community.okd.k8s:
+ src: '{{ files_dir }}/simple-template.yaml'
+ namespace: process-test
+
+- name: Process the template and create the resources
+ community.okd.openshift_process:
+ name: simple-example
+ namespace: process-test # only needed if using a template already on the server
+ namespace_target: process-test
+ parameter_file: '{{ files_dir }}/example.env'
+ state: present
+ register: result
+
+- assert:
+ that: result is not changed
+
+# Processing template without message
+- name: create template with file {{ files_dir }}/pod-template.yaml
+ kubernetes.core.k8s:
+ namespace: process-test
+ src: "{{ files_dir }}/pod-template.yaml"
+ state: present
+
+- name: Process pod template
+ community.okd.openshift_process:
+ name: pod-template
+ namespace: process-test
+ state: rendered
+ parameters:
+ NAME: ansible
+ register: rendered_template
+
+- assert:
+ that: rendered_template.message == ""
diff --git a/ansible_collections/community/okd/molecule/default/tasks/openshift_prune_images.yml b/ansible_collections/community/okd/molecule/default/tasks/openshift_prune_images.yml
new file mode 100644
index 000000000..86630da69
--- /dev/null
+++ b/ansible_collections/community/okd/molecule/default/tasks/openshift_prune_images.yml
@@ -0,0 +1,217 @@
+---
+- name: Read registry information
+ community.okd.openshift_registry_info:
+ check: yes
+ register: registry
+
+- name: Display registry information
+ debug: var=registry
+
+- block:
+ - set_fact:
+ prune_ns: "prune-images"
+ prune_registry: "{{ registry.public_hostname }}"
+ container:
+ name: "httpd"
+ from: "centos/python-38-centos7:20210629-304c7c8"
+ pod_name: "test-pod"
+
+ - name: Ensure namespace is created
+ community.okd.k8s:
+ kind: Namespace
+ name: "{{ prune_ns }}"
+
+ - name: Import image into internal registry
+ community.okd.openshift_import_image:
+ namespace: "{{ prune_ns }}"
+ name: "{{ container.name }}"
+ source: "{{ container.from }}"
+
+ - name: Create simple Pod
+ community.okd.k8s:
+ namespace: "{{ prune_ns }}"
+ wait: yes
+ definition:
+ apiVersion: v1
+ kind: Pod
+ metadata:
+ name: "{{ pod_name }}"
+ spec:
+ containers:
+ - name: test-container
+ image: "{{ prune_registry }}/{{ prune_ns }}/{{ container.name }}:latest"
+ command:
+ - /bin/sh
+ - -c
+ - while true;do date;sleep 5; done
+
+ - name: Create limit range for images size
+ community.okd.k8s:
+ namespace: "{{ prune_ns }}"
+ definition:
+ kind: "LimitRange"
+ metadata:
+ name: "image-resource-limits"
+ spec:
+ limits:
+ - type: openshift.io/Image
+ max:
+ storage: 1Gi
+
+ - name: Prune images from namespace
+ community.okd.openshift_adm_prune_images:
+ registry_url: "{{ prune_registry }}"
+ namespace: "{{ prune_ns }}"
+ check_mode: yes
+ register: prune
+
+ - name: Assert that nothing to prune as image is in used
+ assert:
+ that:
+ - prune is not changed
+ - prune is successful
+ - prune.deleted_images == []
+ - prune.updated_image_streams == []
+
+ - name: Delete Pod created before
+ community.okd.k8s:
+ state: absent
+ name: "{{ pod_name }}"
+ kind: Pod
+ namespace: "{{ prune_ns }}"
+ wait: yes
+
+ - name: Prune images from namespace
+ community.okd.openshift_adm_prune_images:
+ registry_url: "{{ prune_registry }}"
+ namespace: "{{ prune_ns }}"
+ check_mode: yes
+ register: prune
+
+ - name: Read ImageStream
+ kubernetes.core.k8s_info:
+ version: image.openshift.io/v1
+ kind: ImageStream
+ namespace: "{{ prune_ns }}"
+ name: "{{ container.name }}"
+ register: isinfo
+
+ - set_fact:
+ is_image_name: "{{ isinfo.resources.0.status.tags[0]['items'].0.image }}"
+
+ - name: Assert that corresponding Image and ImageStream were candidate for pruning
+ assert:
+ that:
+ - prune is changed
+ - prune.deleted_images | length == 1
+ - prune.deleted_images.0.metadata.name == is_image_name
+ - prune.updated_image_streams | length == 1
+ - prune.updated_image_streams.0.metadata.name == container.name
+ - prune.updated_image_streams.0.metadata.namespace == prune_ns
+ - prune.updated_image_streams.0.status.tags == []
+
+ - name: Prune images from namespace keeping images and referrer younger than 60minutes
+ community.okd.openshift_adm_prune_images:
+ registry_url: "{{ prune_registry }}"
+ namespace: "{{ prune_ns }}"
+ keep_younger_than: 60
+ check_mode: yes
+ register: younger
+
+ - assert:
+ that:
+ - younger is not changed
+ - younger is successful
+ - younger.deleted_images == []
+ - younger.updated_image_streams == []
+
+ - name: Prune images over size limit
+ community.okd.openshift_adm_prune_images:
+ registry_url: "{{ prune_registry }}"
+ namespace: "{{ prune_ns }}"
+ prune_over_size_limit: yes
+ check_mode: yes
+ register: prune_over_size
+
+ - assert:
+ that:
+ - prune_over_size is not changed
+ - prune_over_size is successful
+ - prune_over_size.deleted_images == []
+ - prune_over_size.updated_image_streams == []
+
+ - name: Update limit range for images size
+ community.okd.k8s:
+ namespace: "{{ prune_ns }}"
+ definition:
+ kind: "LimitRange"
+ metadata:
+ name: "image-resource-limits"
+ spec:
+ limits:
+ - type: openshift.io/Image
+ max:
+ storage: 1Ki
+
+ - name: Prune images over size limit (check_mode=yes)
+ community.okd.openshift_adm_prune_images:
+ registry_url: "{{ prune_registry }}"
+ namespace: "{{ prune_ns }}"
+ prune_over_size_limit: yes
+ check_mode: yes
+ register: prune
+
+ - name: Assert Images and ImageStream were candidate for prune
+ assert:
+ that:
+ - prune is changed
+ - prune.deleted_images | length == 1
+ - prune.deleted_images.0.metadata.name == is_image_name
+ - prune.updated_image_streams | length == 1
+ - prune.updated_image_streams.0.metadata.name == container.name
+ - prune.updated_image_streams.0.metadata.namespace == prune_ns
+ - prune.updated_image_streams.0.status.tags == []
+
+ - name: Prune images over size limit
+ community.okd.openshift_adm_prune_images:
+ registry_url: "{{ prune_registry }}"
+ namespace: "{{ prune_ns }}"
+ prune_over_size_limit: yes
+ register: prune
+
+ - name: Assert that Images and ImageStream were candidate for prune
+ assert:
+ that:
+ - prune is changed
+ - prune.deleted_images | length == 1
+ - prune.deleted_images.0.details.name == is_image_name
+ - prune.updated_image_streams | length == 1
+ - prune.updated_image_streams.0.metadata.name == container.name
+ - prune.updated_image_streams.0.metadata.namespace == prune_ns
+ - '"tags" not in prune.updated_image_streams.0.status'
+
+ - name: Validate that ImageStream was updated
+ kubernetes.core.k8s_info:
+ version: image.openshift.io/v1
+ kind: ImageStream
+ namespace: "{{ prune_ns }}"
+ name: "{{ container.name }}"
+ register: stream
+
+ - name: Assert that ImageStream was updated
+ assert:
+ that:
+ - stream.resources | length == 1
+ - '"tags" not in stream.resources.0.status'
+
+ always:
+ - name: Delete namespace
+ community.okd.k8s:
+ name: "{{ prune_ns }}"
+ kind: Namespace
+ state: absent
+ ignore_errors: true
+
+ when:
+ - registry.public_hostname
+ - registry.check.reached
diff --git a/ansible_collections/community/okd/molecule/default/tasks/openshift_route.yml b/ansible_collections/community/okd/molecule/default/tasks/openshift_route.yml
new file mode 100644
index 000000000..50056b7e4
--- /dev/null
+++ b/ansible_collections/community/okd/molecule/default/tasks/openshift_route.yml
@@ -0,0 +1,275 @@
+---
+- name: Create Deployment
+ community.okd.k8s:
+ wait: yes
+ definition:
+ apiVersion: apps/v1
+ kind: Deployment
+ metadata:
+ name: hello-kubernetes
+ namespace: default
+ spec:
+ replicas: 3
+ selector:
+ matchLabels:
+ app: hello-kubernetes
+ template:
+ metadata:
+ labels:
+ app: hello-kubernetes
+ spec:
+ containers:
+ - name: hello-kubernetes
+ image: docker.io/openshift/hello-openshift
+ ports:
+ - containerPort: 8080
+
+- name: Create Service
+ community.okd.k8s:
+ wait: yes
+ definition:
+ apiVersion: v1
+ kind: Service
+ metadata:
+ name: hello-kubernetes
+ namespace: default
+ spec:
+ ports:
+ - port: 80
+ targetPort: 8080
+ selector:
+ app: hello-kubernetes
+
+- name: Create Route with fewest possible arguments
+ community.okd.openshift_route:
+ service: hello-kubernetes
+ namespace: default
+ register: route
+
+- name: Attempt to hit http URL
+ uri:
+ url: 'http://{{ route.result.spec.host }}'
+ return_content: yes
+ until: result is successful
+ retries: 20
+ register: result
+
+- name: Assert the page content is as expected
+ assert:
+ that:
+ - not result.redirected
+ - result.status == 200
+ - result.content == 'Hello OpenShift!\n'
+
+- name: Delete route
+ community.okd.openshift_route:
+ name: '{{ route.result.metadata.name }}'
+ namespace: default
+ state: absent
+ wait: yes
+
+- name: Create Route with custom name and wait
+ community.okd.openshift_route:
+ service: hello-kubernetes
+ namespace: default
+ name: test1
+ wait: yes
+ register: route
+
+- name: Assert that the condition is properly set
+ assert:
+ that:
+ - route.duration is defined
+ - route.result.status.ingress.0.conditions.0.type == 'Admitted'
+ - route.result.status.ingress.0.conditions.0.status == 'True'
+
+- name: Attempt to hit http URL
+ uri:
+ url: 'http://{{ route.result.spec.host }}'
+ return_content: yes
+ until: result is successful
+ retries: 20
+ register: result
+
+- name: Assert the page content is as expected
+ assert:
+ that:
+ - not result.redirected
+ - result.status == 200
+ - result.content == 'Hello OpenShift!\n'
+
+- name: Delete route
+ community.okd.openshift_route:
+ name: '{{ route.result.metadata.name }}'
+ namespace: default
+ state: absent
+ wait: yes
+
+- name: Create edge-terminated route that allows insecure traffic
+ community.okd.openshift_route:
+ service: hello-kubernetes
+ namespace: default
+ name: hello-kubernetes-https
+ tls:
+ insecure_policy: allow
+ termination: edge
+ register: route
+
+- name: Attempt to hit http URL
+ uri:
+ url: 'http://{{ route.result.spec.host }}'
+ return_content: yes
+ until: result is successful
+ retries: 20
+ register: result
+
+- name: Assert the page content is as expected
+ assert:
+ that:
+ - not result.redirected
+ - result.status == 200
+ - result.content == 'Hello OpenShift!\n'
+
+- name: Attempt to hit https URL
+ uri:
+ url: 'https://{{ route.result.spec.host }}'
+ validate_certs: no
+ return_content: yes
+ until: result is successful
+ retries: 10
+ register: result
+
+- name: Assert the page content is as expected
+ assert:
+ that:
+ - not result.redirected
+ - result.status == 200
+ - result.content == 'Hello OpenShift!\n'
+
+- name: Alter edge-terminated route to redirect insecure traffic
+ community.okd.openshift_route:
+ service: hello-kubernetes
+ namespace: default
+ name: hello-kubernetes-https
+ tls:
+ insecure_policy: redirect
+ termination: edge
+ register: route
+
+- name: Attempt to hit http URL
+ uri:
+ url: 'http://{{ route.result.spec.host }}'
+ return_content: yes
+ validate_certs: no
+ until:
+ - result is successful
+ - result.redirected
+ retries: 10
+ register: result
+
+- name: Assert the page content is as expected
+ assert:
+ that:
+ - result.redirected
+ - result.status == 200
+ - result.content == 'Hello OpenShift!\n'
+
+- name: Attempt to hit https URL
+ uri:
+ url: 'https://{{ route.result.spec.host }}'
+ validate_certs: no
+ return_content: yes
+ until: result is successful
+ retries: 20
+ register: result
+
+- name: Assert the page content is as expected
+ assert:
+ that:
+ - not result.redirected
+ - result.status == 200
+ - result.content == 'Hello OpenShift!\n'
+
+- name: Alter edge-terminated route with insecure traffic disabled
+ community.okd.openshift_route:
+ service: hello-kubernetes
+ namespace: default
+ name: hello-kubernetes-https
+ tls:
+ insecure_policy: disallow
+ termination: edge
+ register: route
+
+- debug: var=route
+
+- name: Attempt to hit https URL
+ uri:
+ url: 'https://{{ route.result.spec.host }}'
+ validate_certs: no
+ return_content: yes
+ until: result is successful
+ retries: 20
+ register: result
+
+- name: Assert the page content is as expected
+ assert:
+ that:
+ - not result.redirected
+ - result.status == 200
+ - result.content == 'Hello OpenShift!\n'
+
+- name: Attempt to hit http URL
+ uri:
+ url: 'http://{{ route.result.spec.host }}'
+ status_code: 503
+ until: result is successful
+ retries: 20
+ register: result
+
+- debug: var=result
+
+- name: Assert the page content is as expected
+ assert:
+ that:
+ - not result.redirected
+ - result.status == 503
+
+- name: Delete route
+ community.okd.openshift_route:
+ name: '{{ route.result.metadata.name }}'
+ namespace: default
+ state: absent
+ wait: yes
+
+# Route with labels and annotations
+- name: Create route with labels and annotations
+ community.okd.openshift_route:
+ service: hello-kubernetes
+ namespace: default
+ name: route-label-annotation
+ labels:
+ ansible: test
+ annotations:
+ haproxy.router.openshift.io/balance: roundrobin
+
+- name: Get route information
+ kubernetes.core.k8s_info:
+ api_version: route.openshift.io/v1
+ kind: Route
+ name: route-label-annotation
+ namespace: default
+ register: route
+
+- assert:
+ that:
+ - route.resources[0].metadata.annotations is defined
+ - '"haproxy.router.openshift.io/balance" in route.resources[0].metadata.annotations'
+ - route.resources[0].metadata.labels is defined
+ - '"ansible" in route.resources[0].metadata.labels'
+
+- name: Delete route
+ community.okd.openshift_route:
+ name: route-label-annotation
+ namespace: default
+ state: absent
+ wait: yes
diff --git a/ansible_collections/community/okd/molecule/default/tasks/validate_installed.yml b/ansible_collections/community/okd/molecule/default/tasks/validate_installed.yml
new file mode 100644
index 000000000..4508efdd6
--- /dev/null
+++ b/ansible_collections/community/okd/molecule/default/tasks/validate_installed.yml
@@ -0,0 +1,122 @@
+---
+- block:
+ - name: Create a project
+ community.okd.k8s:
+ name: "{{ playbook_namespace }}"
+ kind: Project
+ api_version: project.openshift.io/v1
+
+ - name: incredibly simple ConfigMap
+ community.okd.k8s:
+ definition:
+ apiVersion: v1
+ kind: ConfigMap
+ metadata:
+ name: hello
+ namespace: "{{ playbook_namespace }}"
+ validate:
+ fail_on_error: yes
+ register: k8s_with_validate
+
+ - name: assert that k8s_with_validate succeeds
+ assert:
+ that:
+ - k8s_with_validate is successful
+
+ - name: extra property does not fail without strict
+ community.okd.k8s:
+ src: "files/kuard-extra-property.yml"
+ namespace: "{{ playbook_namespace }}"
+ validate:
+ fail_on_error: yes
+ strict: no
+
+ - name: extra property fails with strict
+ community.okd.k8s:
+ src: "files/kuard-extra-property.yml"
+ namespace: "{{ playbook_namespace }}"
+ validate:
+ fail_on_error: yes
+ strict: yes
+ ignore_errors: yes
+ register: extra_property
+
+ - name: check that extra property fails with strict
+ assert:
+ that:
+ - extra_property is failed
+
+ - name: invalid type fails at validation stage
+ community.okd.k8s:
+ src: "files/kuard-invalid-type.yml"
+ namespace: "{{ playbook_namespace }}"
+ validate:
+ fail_on_error: yes
+ strict: no
+ ignore_errors: yes
+ register: invalid_type
+
+ - name: check that invalid type fails
+ assert:
+ that:
+ - invalid_type is failed
+
+ - name: invalid type fails with warnings when fail_on_error is False
+ community.okd.k8s:
+ src: "files/kuard-invalid-type.yml"
+ namespace: "{{ playbook_namespace }}"
+ validate:
+ fail_on_error: no
+ strict: no
+ ignore_errors: yes
+ register: invalid_type_no_fail
+
+ - name: check that invalid type fails
+ assert:
+ that:
+ - invalid_type_no_fail is failed
+
+ - name: setup custom resource definition
+ community.okd.k8s:
+ src: "files/setup-crd.yml"
+
+ - name: wait a few seconds
+ pause:
+ seconds: 5
+
+ - name: add custom resource definition
+ community.okd.k8s:
+ src: "files/crd-resource.yml"
+ namespace: "{{ playbook_namespace }}"
+ validate:
+ fail_on_error: yes
+ strict: yes
+ register: unknown_kind
+
+ - name: check that unknown kind warns
+ assert:
+ that:
+ - unknown_kind is successful
+
+ always:
+ - name: remove custom resource
+ community.okd.k8s:
+ definition: "{{ lookup('file', 'files/crd-resource.yml') }}"
+ namespace: "{{ playbook_namespace }}"
+ state: absent
+ ignore_errors: yes
+
+ - name: remove custom resource definitions
+ community.okd.k8s:
+ definition: "{{ lookup('file', 'files/setup-crd.yml') }}"
+ state: absent
+
+ - name: Delete namespace
+ community.okd.k8s:
+ state: absent
+ definition:
+ - kind: Project
+ apiVersion: project.openshift.io/v1
+ metadata:
+ name: "{{ playbook_namespace }}"
+ ignore_errors: yes
diff --git a/ansible_collections/community/okd/molecule/default/tasks/validate_not_installed.yml b/ansible_collections/community/okd/molecule/default/tasks/validate_not_installed.yml
new file mode 100644
index 000000000..a64607ce0
--- /dev/null
+++ b/ansible_collections/community/okd/molecule/default/tasks/validate_not_installed.yml
@@ -0,0 +1,25 @@
+---
+# TODO: Not available in ansible-base
+# - python_requirements_info:
+# dependencies:
+# - openshift
+# - kubernetes
+# - kubernetes-validate
+
+- community.okd.k8s:
+ definition:
+ apiVersion: v1
+ kind: ConfigMap
+ metadata:
+ name: hello
+ namespace: default
+ validate:
+ fail_on_error: yes
+ ignore_errors: yes
+ register: k8s_no_validate
+
+- name: assert that k8s_no_validate fails gracefully
+ assert:
+ that:
+ - k8s_no_validate is failed
+ - k8s_no_validate.msg.startswith('Failed to import the required Python library (kubernetes-validate)')