summaryrefslogtreecommitdiffstats
path: root/collections-debian-merged/ansible_collections/community/kubernetes/molecule
diff options
context:
space:
mode:
Diffstat (limited to 'collections-debian-merged/ansible_collections/community/kubernetes/molecule')
-rw-r--r--collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/converge.yml119
-rw-r--r--collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/molecule.yml38
-rw-r--r--collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/roles/helm/defaults/main.yml15
-rw-r--r--collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/roles/helm/files/appversionless-chart/Chart.yaml5
-rw-r--r--collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/roles/helm/files/test-chart/Chart.yaml6
-rw-r--r--collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/roles/helm/files/values.yaml2
-rw-r--r--collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/roles/helm/tasks/install.yml11
-rw-r--r--collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/roles/helm/tasks/main.yml7
-rw-r--r--collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/roles/helm/tasks/run_test.yml35
-rw-r--r--collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/roles/helm/tasks/test_helm_not_installed.yml15
-rw-r--r--collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/roles/helm/tasks/tests_chart.yml287
-rw-r--r--collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/roles/helm/tasks/tests_chart/from_local_path.yml88
-rw-r--r--collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/roles/helm/tasks/tests_chart/from_repository.yml19
-rw-r--r--collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/roles/helm/tasks/tests_chart/from_url.yml7
-rw-r--r--collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/roles/helm/tasks/tests_helm_plugin.yml84
-rw-r--r--collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/roles/helm/tasks/tests_repository.yml61
-rw-r--r--collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/tasks/access_review.yml22
-rw-r--r--collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/tasks/append_hash.yml69
-rw-r--r--collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/tasks/apply.yml769
-rw-r--r--collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/tasks/cluster_info.yml22
-rw-r--r--collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/tasks/crd.yml66
-rw-r--r--collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/tasks/delete.yml95
-rw-r--r--collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/tasks/exec.yml64
-rw-r--r--collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/tasks/full.yml373
-rw-r--r--collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/tasks/info.yml167
-rw-r--r--collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/tasks/lists.yml139
-rw-r--r--collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/tasks/log.yml124
-rw-r--r--collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/tasks/rollback.yml217
-rw-r--r--collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/tasks/scale.yml210
-rw-r--r--collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/tasks/template.yml167
-rw-r--r--collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/tasks/waiter.yml363
-rw-r--r--collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/templates/pod_template_one.j216
-rw-r--r--collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/templates/pod_template_three.j235
-rw-r--r--collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/templates/pod_template_two.j216
-rw-r--r--collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/vars/main.yml40
35 files changed, 3773 insertions, 0 deletions
diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/converge.yml b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/converge.yml
new file mode 100644
index 00000000..f6dcb454
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/converge.yml
@@ -0,0 +1,119 @@
+---
+- name: Converge
+ hosts: localhost
+ connection: local
+
+ collections:
+ - community.kubernetes
+
+ vars_files:
+ - vars/main.yml
+
+ tasks:
+ - name: Verify cluster is working.
+ k8s_info:
+ namespace: kube-system
+ kind: Pod
+ register: pod_list
+
+ - name: Verify cluster has more than 5 pods running.
+ assert:
+ that: (pod_list.resources | count) > 5
+
+ - include_tasks: tasks/delete.yml
+ - include_tasks: tasks/scale.yml
+ - include_tasks: tasks/apply.yml
+ - include_tasks: tasks/waiter.yml
+ - include_tasks: tasks/full.yml
+ - include_tasks: tasks/exec.yml
+ - include_tasks: tasks/log.yml
+ - include_tasks: tasks/cluster_info.yml
+ - include_tasks: tasks/access_review.yml
+ - include_tasks: tasks/rollback.yml
+
+ roles:
+ - helm
+
+ post_tasks:
+ - name: Ensure namespace exists
+ k8s:
+ api_version: v1
+ kind: Namespace
+ name: inventory
+
+ - name: Add a deployment
+ k8s:
+ definition:
+ apiVersion: apps/v1
+ kind: Deployment
+ metadata:
+ name: inventory
+ namespace: inventory
+ spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: "{{ k8s_pod_name }}"
+ template: "{{ k8s_pod_template }}"
+ wait: yes
+ wait_timeout: 120
+ vars:
+ k8s_pod_name: inventory
+ k8s_pod_image: python
+ k8s_pod_command:
+ - python
+ - '-m'
+ - http.server
+ k8s_pod_env:
+ - name: TEST
+ value: test
+
+ - meta: refresh_inventory
+
+- name: Verify inventory and connection plugins
+ hosts: namespace_inventory_pods
+ gather_facts: no
+
+ vars:
+ file_content: |
+ Hello world
+
+ tasks:
+ - name: End play if host not running (TODO should we not add these to the inventory?)
+ meta: end_host
+ when: pod_phase != "Running"
+
+ - debug: var=hostvars
+ - setup:
+
+ - debug: var=ansible_facts
+
+ - name: Assert the TEST environment variable was retrieved
+ assert:
+ that: ansible_facts.env.TEST == 'test'
+
+ - name: Copy a file into the host
+ copy:
+ content: '{{ file_content }}'
+ dest: /tmp/test_file
+
+ - name: Retrieve the file from the host
+ slurp:
+ src: /tmp/test_file
+ register: slurped_file
+
+ - name: Assert the file content matches expectations
+ assert:
+ that: (slurped_file.content|b64decode) == file_content
+
+- name: Delete inventory namespace
+ hosts: localhost
+ connection: local
+ gather_facts: no
+ tasks:
+ - name: Remove inventory namespace
+ k8s:
+ api_version: v1
+ kind: Namespace
+ name: inventory
+ state: absent
diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/molecule.yml b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/molecule.yml
new file mode 100644
index 00000000..693cd351
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/molecule.yml
@@ -0,0 +1,38 @@
+---
+driver:
+ name: delegated
+ options:
+ managed: false
+ login_cmd_template: 'docker exec -ti {instance} bash'
+ ansible_connection_options:
+ ansible_connection: docker
+lint: |
+ set -e
+ yamllint .
+ flake8
+platforms:
+ - name: instance-kind
+provisioner:
+ name: ansible
+ log: true
+ config_options:
+ inventory:
+ enable_plugins: community.kubernetes.k8s
+ lint: {}
+ inventory:
+ hosts:
+ plugin: community.kubernetes.k8s
+ host_vars:
+ localhost:
+ ansible_python_interpreter: '{{ ansible_playbook_python }}'
+ env:
+ ANSIBLE_FORCE_COLOR: 'true'
+ options:
+ vvv: True
+scenario:
+ name: default
+ test_sequence:
+ - lint
+ - syntax
+ - converge
+ - verify
diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/roles/helm/defaults/main.yml b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/roles/helm/defaults/main.yml
new file mode 100644
index 00000000..b5a2a31f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/roles/helm/defaults/main.yml
@@ -0,0 +1,15 @@
+---
+helm_archive_name: "helm-{{ helm_version }}-{{ ansible_system | lower }}-amd64.tar.gz"
+helm_binary: "/tmp/helm/{{ ansible_system | lower }}-amd64/helm"
+helm_namespace: helm
+
+tiller_namespace: tiller
+tiller_cluster_role: cluster-admin
+
+chart_test: "nginx-ingress"
+chart_test_version: 1.32.0
+chart_test_version_upgrade: 1.33.0
+chart_test_repo: "https://kubernetes-charts.storage.googleapis.com"
+chart_test_git_repo: "http://github.com/helm/charts.git"
+chart_test_values:
+ revisionHistoryLimit: 0
diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/roles/helm/files/appversionless-chart/Chart.yaml b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/roles/helm/files/appversionless-chart/Chart.yaml
new file mode 100644
index 00000000..c308a00a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/roles/helm/files/appversionless-chart/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v2
+name: appversionless-chart
+description: A chart used in molecule tests
+type: application
+version: 0.1.0
diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/roles/helm/files/test-chart/Chart.yaml b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/roles/helm/files/test-chart/Chart.yaml
new file mode 100644
index 00000000..5d09a08c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/roles/helm/files/test-chart/Chart.yaml
@@ -0,0 +1,6 @@
+apiVersion: v2
+name: test-chart
+description: A chart used in molecule tests
+type: application
+version: 0.1.0
+appVersion: "default"
diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/roles/helm/files/values.yaml b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/roles/helm/files/values.yaml
new file mode 100644
index 00000000..7b057068
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/roles/helm/files/values.yaml
@@ -0,0 +1,2 @@
+---
+revisionHistoryLimit: 0
diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/roles/helm/tasks/install.yml b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/roles/helm/tasks/install.yml
new file mode 100644
index 00000000..8030aac7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/roles/helm/tasks/install.yml
@@ -0,0 +1,11 @@
+---
+- name: Init Helm folders
+ file:
+ path: /tmp/helm/
+ state: directory
+
+- name: Unarchive Helm binary
+ unarchive:
+ src: 'https://get.helm.sh/{{ helm_archive_name }}'
+ dest: /tmp/helm/
+ remote_src: yes
diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/roles/helm/tasks/main.yml b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/roles/helm/tasks/main.yml
new file mode 100644
index 00000000..e86d33df
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/roles/helm/tasks/main.yml
@@ -0,0 +1,7 @@
+---
+- name: Run tests
+ include_tasks: run_test.yml
+ loop_control:
+ loop_var: helm_version
+ with_items:
+ - "v3.2.4"
diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/roles/helm/tasks/run_test.yml b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/roles/helm/tasks/run_test.yml
new file mode 100644
index 00000000..0384a2e4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/roles/helm/tasks/run_test.yml
@@ -0,0 +1,35 @@
+---
+- name: Ensure helm is not installed
+ file:
+ path: "{{ item }}"
+ state: absent
+ with_items:
+ - "/tmp/helm"
+
+- name: Check failed if helm is not installed
+ include_tasks: test_helm_not_installed.yml
+
+- name: "Install {{ helm_version }}"
+ include_tasks: install.yml
+
+- name: tests_repository
+ include_tasks: tests_repository.yml
+
+- name: Deploy charts
+ include_tasks: "tests_chart/{{ test_chart_type }}.yml"
+ loop_control:
+ loop_var: test_chart_type
+ with_items:
+ - from_local_path
+ - from_repository
+ - from_url
+
+- name: Test helm plugin
+ include_tasks: tests_helm_plugin.yml
+
+- name: Clean helm install
+ file:
+ path: "{{ item }}"
+ state: absent
+ with_items:
+ - "/tmp/helm/"
diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/roles/helm/tasks/test_helm_not_installed.yml b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/roles/helm/tasks/test_helm_not_installed.yml
new file mode 100644
index 00000000..0832dcb0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/roles/helm/tasks/test_helm_not_installed.yml
@@ -0,0 +1,15 @@
+---
+- name: Failed test when helm is not installed
+ helm:
+ binary_path: "{{ helm_binary}}_fake"
+ name: test
+ chart_ref: "{{ chart_test }}"
+ namespace: "{{ helm_namespace }}"
+ ignore_errors: yes
+ register: helm_missing_binary
+
+- name: Assert that helm is not installed
+ assert:
+ that:
+ - helm_missing_binary is failed
+ - "'No such file or directory' in helm_missing_binary.msg"
diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/roles/helm/tasks/tests_chart.yml b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/roles/helm/tasks/tests_chart.yml
new file mode 100644
index 00000000..bb600f9b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/roles/helm/tasks/tests_chart.yml
@@ -0,0 +1,287 @@
+---
+- name: Check helm_info empty
+ helm_info:
+ binary_path: "{{ helm_binary }}"
+ name: test
+ namespace: "{{ helm_namespace }}"
+ register: empty_info
+
+- name: "Assert that no charts are installed with helm_info"
+ assert:
+ that:
+ - empty_info.status is undefined
+
+- name: "Install fail {{ chart_test }} from {{ source }}"
+ helm:
+ binary_path: "{{ helm_binary }}"
+ name: test
+ chart_ref: "{{ chart_source }}"
+ chart_version: "{{ chart_source_version | default(omit) }}"
+ namespace: "{{ helm_namespace }}"
+ ignore_errors: yes
+ register: install_fail
+
+- name: "Assert that Install fail {{ chart_test }} from {{ source }}"
+ assert:
+ that:
+ - install_fail is failed
+ - "'Error: create: failed to create: namespaces \"' + helm_namespace + '\" not found' in install_fail.stderr"
+
+- name: "Install {{ chart_test }} from {{ source }}"
+ helm:
+ binary_path: "{{ helm_binary }}"
+ name: test
+ chart_ref: "{{ chart_source }}"
+ chart_version: "{{ chart_source_version | default(omit) }}"
+ namespace: "{{ helm_namespace }}"
+ create_namespace: true
+ register: install
+
+- name: "Assert that {{ chart_test }} chart is installed from {{ source }}"
+ assert:
+ that:
+ - install is changed
+ - install.status.chart == "{{ chart_test }}-{{ chart_test_version }}"
+ - install.status.status | lower == 'deployed'
+
+- name: Check helm_info content
+ helm_info:
+ binary_path: "{{ helm_binary }}"
+ name: test
+ namespace: "{{ helm_namespace }}"
+ register: content_info
+
+- name: "Assert that {{ chart_test }} is installed from {{ source }} with helm_info"
+ assert:
+ that:
+ - content_info.status.chart == "{{ chart_test }}-{{ chart_test_version }}"
+ - content_info.status.status | lower == 'deployed'
+
+- name: Check idempotency
+ helm:
+ binary_path: "{{ helm_binary }}"
+ name: test
+ chart_ref: "{{ chart_source }}"
+ chart_version: "{{ chart_source_version | default(omit) }}"
+ namespace: "{{ helm_namespace }}"
+ register: install
+
+- name: Assert idempotency
+ assert:
+ that:
+ - install is not changed
+ - install.status.chart == "{{ chart_test }}-{{ chart_test_version }}"
+ - install.status.status | lower == 'deployed'
+
+- name: "Add vars to {{ chart_test }} from {{ source }}"
+ helm:
+ binary_path: "{{ helm_binary }}"
+ name: test
+ chart_ref: "{{ chart_source }}"
+ chart_version: "{{ chart_source_version | default(omit) }}"
+ namespace: "{{ helm_namespace }}"
+ values: "{{ chart_test_values }}"
+ register: install
+
+- name: "Assert that {{ chart_test }} chart is upgraded with new var from {{ source }}"
+ assert:
+ that:
+ - install is changed
+ - install.status.status | lower == 'deployed'
+ - install.status.chart == "{{ chart_test }}-{{ chart_test_version }}"
+ - "install.status['values'].revisionHistoryLimit == 0"
+
+- name: Check idempotency after adding vars
+ helm:
+ binary_path: "{{ helm_binary }}"
+ name: test
+ chart_ref: "{{ chart_source }}"
+ chart_version: "{{ chart_source_version | default(omit) }}"
+ namespace: "{{ helm_namespace }}"
+ values: "{{ chart_test_values }}"
+ register: install
+
+- name: Assert idempotency after add vars
+ assert:
+ that:
+ - install is not changed
+ - install.status.status | lower == 'deployed'
+ - install.status.chart == "{{ chart_test }}-{{ chart_test_version }}"
+ - "install.status['values'].revisionHistoryLimit == 0"
+
+- name: "Remove Vars to {{ chart_test }} from {{ source }}"
+ helm:
+ binary_path: "{{ helm_binary }}"
+ name: test
+ chart_ref: "{{ chart_source }}"
+ chart_version: "{{ chart_source_version | default(omit) }}"
+ namespace: "{{ helm_namespace }}"
+ register: install
+
+- name: "Assert that {{ chart_test }} chart is upgraded with new var from {{ source }}"
+ assert:
+ that:
+ - install is changed
+ - install.status.status | lower == 'deployed'
+ - install.status.chart == "{{ chart_test }}-{{ chart_test_version }}"
+ - install.status['values'] == {}
+
+- name: Check idempotency after removing vars
+ helm:
+ binary_path: "{{ helm_binary }}"
+ name: test
+ chart_ref: "{{ chart_source }}"
+ chart_version: "{{ chart_source_version | default(omit) }}"
+ namespace: "{{ helm_namespace }}"
+ register: install
+
+- name: Assert idempotency after removing vars
+ assert:
+ that:
+ - install is not changed
+ - install.status.status | lower == 'deployed'
+ - install.status.chart == "{{ chart_test }}-{{ chart_test_version }}"
+ - install.status['values'] == {}
+
+- name: "Upgrade {{ chart_test }} from {{ source }}"
+ helm:
+ binary_path: "{{ helm_binary }}"
+ name: test
+ chart_ref: "{{ chart_source_upgrade | default(chart_source) }}"
+ chart_version: "{{ chart_source_version_upgrade | default(omit) }}"
+ namespace: "{{ helm_namespace }}"
+ register: install
+
+- name: "Assert that {{ chart_test }} chart is upgraded with new version from {{ source }}"
+ assert:
+ that:
+ - install is changed
+ - install.status.status | lower == 'deployed'
+ - install.status.chart == "{{ chart_test }}-{{ chart_test_version_upgrade }}"
+
+- name: Check idempotency after upgrade
+ helm:
+ binary_path: "{{ helm_binary }}"
+ name: test
+ chart_ref: "{{ chart_source_upgrade | default(chart_source) }}"
+ chart_version: "{{ chart_source_version_upgrade | default(omit) }}"
+ namespace: "{{ helm_namespace }}"
+ register: install
+
+- name: Assert idempotency after upgrade
+ assert:
+ that:
+ - install is not changed
+ - install.status.status | lower == 'deployed'
+ - install.status.chart == "{{ chart_test }}-{{ chart_test_version_upgrade }}"
+
+- name: "Remove {{ chart_test }} from {{ source }}"
+ helm:
+ binary_path: "{{ helm_binary }}"
+ state: absent
+ name: test
+ namespace: "{{ helm_namespace }}"
+ register: install
+
+- name: "Assert that {{ chart_test }} chart is removed from {{ source }}"
+ assert:
+ that:
+ - install is changed
+
+- name: Check idempotency after remove
+ helm:
+ binary_path: "{{ helm_binary }}"
+ state: absent
+ name: test
+ namespace: "{{ helm_namespace }}"
+ register: install
+
+- name: Assert idempotency
+ assert:
+ that:
+ - install is not changed
+
+# Test --replace
+- name: Install chart for replace option
+ helm:
+ binary_path: "{{ helm_binary }}"
+ name: test-0001
+ chart_ref: "{{ chart_source }}"
+ chart_version: "{{ chart_source_version | default(omit) }}"
+ namespace: "{{ helm_namespace }}"
+ register: install
+
+- name: "Assert that {{ chart_test }} chart is installed from {{ source }}"
+ assert:
+ that:
+ - install is changed
+
+- name: Remove {{ chart_test }} with --purge
+ helm:
+ binary_path: "{{ helm_binary }}"
+ state: absent
+ name: test-0001
+ purge: False
+ namespace: "{{ helm_namespace }}"
+ register: install
+
+- name: Check if chart is removed
+ assert:
+ that:
+ - install is changed
+
+- name: Install chart again with same name test-0001
+ helm:
+ binary_path: "{{ helm_binary }}"
+ name: test-0001
+ chart_ref: "{{ chart_source }}"
+ chart_version: "{{ chart_source_version | default(omit) }}"
+ namespace: "{{ helm_namespace }}"
+ replace: True
+ register: install
+
+- name: "Assert that {{ chart_test }} chart is installed from {{ source }}"
+ assert:
+ that:
+ - install is changed
+
+- name: Remove {{ chart_test }} (cleanup)
+ helm:
+ binary_path: "{{ helm_binary }}"
+ state: absent
+ name: test-0001
+ namespace: "{{ helm_namespace }}"
+ register: install
+
+- name: Check if chart is removed
+ assert:
+ that:
+ - install is changed
+
+- name: "Install {{ chart_test }} from {{ source }} with values_files"
+ helm:
+ binary_path: "{{ helm_binary }}"
+ name: test
+ chart_ref: "{{ chart_source }}"
+ chart_version: "{{ chart_source_version | default(omit) }}"
+ namespace: "{{ helm_namespace }}"
+ values_files:
+ - "{{ role_path }}/files/values.yaml"
+ register: install
+
+- name: "Assert that {{ chart_test }} chart has var from {{ source }}"
+ assert:
+ that:
+ - install is changed
+ - install.status.status | lower == 'deployed'
+ - install.status.chart == "{{ chart_test }}-{{ chart_test_version }}"
+ - "install.status['values'].revisionHistoryLimit == 0"
+
+- name: Remove helm namespace
+ k8s:
+ api_version: v1
+ kind: Namespace
+ name: "{{ helm_namespace }}"
+ state: absent
+ wait: true
+ wait_timeout: 180
diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/roles/helm/tasks/tests_chart/from_local_path.yml b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/roles/helm/tasks/tests_chart/from_local_path.yml
new file mode 100644
index 00000000..58409809
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/roles/helm/tasks/tests_chart/from_local_path.yml
@@ -0,0 +1,88 @@
+---
+- name: Git clone stable repo
+ git:
+ repo: "{{ chart_test_git_repo }}"
+ dest: /tmp/helm_test_repo
+ version: 631eb8413f6728962439488f48d7d6fbb954a6db
+
+- name: Git clone stable repo upgrade
+ git:
+ repo: "{{ chart_test_git_repo }}"
+ dest: /tmp/helm_test_repo_upgrade
+ version: d37b5025ffc8be49699898369fbb59661e2a8ffb
+
+- name: Install Chart from local path
+ include_tasks: "../tests_chart.yml"
+ vars:
+ source: local_path
+ chart_source: "/tmp/helm_test_repo/stable/{{ chart_test }}/"
+ chart_source_upgrade: "/tmp/helm_test_repo_upgrade/stable/{{ chart_test }}/"
+
+- name: Test appVersion idempotence
+ vars:
+ chart_test: "test-chart"
+ chart_test_version: "0.1.0"
+ chart_test_version_upgrade: "0.1.0"
+ chart_test_app_version: "v1"
+ chart_test_upgrade_app_version: "v2"
+ block:
+ - name: Copy test chart
+ copy:
+ src: "{{ chart_test }}"
+ dest: "/tmp/helm_test_appversion/test-chart/"
+
+ # create package with appVersion v1
+ - name: "Package chart into archive with appVersion {{ chart_test_app_version }}"
+ command: "{{ helm_binary }} package --app-version {{ chart_test_app_version }} /tmp/helm_test_appversion/test-chart/{{ chart_test }}"
+ - name: "Move appVersion {{ chart_test_app_version }} chart archive"
+ copy:
+ remote_src: true
+ src: "test-chart-{{ chart_test_version }}.tgz"
+ dest: "/tmp/helm_test_appversion/test-chart/{{ chart_test }}-{{ chart_test_app_version }}-{{ chart_test_version }}.tgz"
+
+ # create package with appVersion v2
+ - name: "Package chart into archive with appVersion {{ chart_test_upgrade_app_version }}"
+ command: "{{ helm_binary }} package --app-version {{ chart_test_upgrade_app_version }} /tmp/helm_test_appversion/test-chart/{{ chart_test }}"
+ - name: "Move appVersion {{ chart_test_upgrade_app_version }} chart archive"
+ copy:
+ remote_src: true
+ src: "test-chart-{{ chart_test_version }}.tgz"
+ dest: "/tmp/helm_test_appversion/test-chart/{{ chart_test }}-{{ chart_test_upgrade_app_version }}-{{ chart_test_version }}.tgz"
+
+ - name: Install Chart from local path
+ include_tasks: "../tests_chart.yml"
+ vars:
+ source: local_path
+ chart_source: "/tmp/helm_test_appversion/test-chart/{{ chart_test }}-{{ chart_test_app_version }}-{{ chart_test_version }}.tgz"
+ chart_source_upgrade: "/tmp/helm_test_appversion/test-chart/{{ chart_test }}-{{ chart_test_upgrade_app_version }}-{{ chart_test_version }}.tgz"
+
+- name: Test appVersion handling when null
+ vars:
+ chart_test: "appversionless-chart"
+ chart_test_version: "0.1.0"
+ chart_test_version_upgrade: "0.1.0"
+ block:
+ - name: Copy test chart
+ copy:
+ src: "{{ chart_test }}"
+ dest: "/tmp/helm_test_appversion/test-null/"
+
+ # create package with appVersion v1
+ - name: "Package chart into archive with appVersion v1"
+ command: "{{ helm_binary }} package --app-version v1 /tmp/helm_test_appversion/test-null/{{ chart_test }}"
+
+ - name: Install Chart from local path
+ include_tasks: "../tests_chart.yml"
+ vars:
+ source: local_path
+ chart_source: "/tmp/helm_test_appversion/test-null/{{ chart_test }}/"
+ chart_source_upgrade: "{{ chart_test }}-{{ chart_test_version }}.tgz"
+
+- name: Remove clone repos
+ file:
+ path: "{{ item }}"
+ state: absent
+ with_items:
+ - /tmp/helm_test_repo
+ - /tmp/helm_test_repo_upgrade
+ - /tmp/helm_test_appversion
diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/roles/helm/tasks/tests_chart/from_repository.yml b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/roles/helm/tasks/tests_chart/from_repository.yml
new file mode 100644
index 00000000..067b216b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/roles/helm/tasks/tests_chart/from_repository.yml
@@ -0,0 +1,19 @@
+---
+- name: Add chart repo
+ helm_repository:
+ name: test_helm
+ repo_url: "{{ chart_test_repo }}"
+
+- name: Install Chart from repository
+ include_tasks: "../tests_chart.yml"
+ vars:
+ source: repository
+ chart_source: "test_helm/{{ chart_test }}"
+ chart_source_version: "{{ chart_test_version }}"
+ chart_source_version_upgrade: "{{ chart_test_version_upgrade }}"
+
+- name: Add chart repo
+ helm_repository:
+ name: test_helm
+ repo_url: "{{ chart_test_repo }}"
+ state: absent
diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/roles/helm/tasks/tests_chart/from_url.yml b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/roles/helm/tasks/tests_chart/from_url.yml
new file mode 100644
index 00000000..fd3f66c3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/roles/helm/tasks/tests_chart/from_url.yml
@@ -0,0 +1,7 @@
+---
+- name: Install Chart from URL
+ include_tasks: "../tests_chart.yml"
+ vars:
+ source: url
+ chart_source: "{{ chart_test_repo }}/{{ chart_test }}-{{ chart_test_version }}.tgz"
+ chart_source_upgrade: "{{ chart_test_repo }}/{{ chart_test }}-{{ chart_test_version_upgrade }}.tgz"
diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/roles/helm/tasks/tests_helm_plugin.yml b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/roles/helm/tasks/tests_helm_plugin.yml
new file mode 100644
index 00000000..720a06d5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/roles/helm/tasks/tests_helm_plugin.yml
@@ -0,0 +1,84 @@
+---
+- name: Install env plugin in check mode
+ helm_plugin:
+ binary_path: "{{ helm_binary }}"
+ namespace: "{{ helm_namespace }}"
+ state: present
+ plugin_path: https://github.com/adamreese/helm-env
+ register: check_install_env
+ check_mode: true
+
+- assert:
+ that:
+ - check_install_env.changed
+
+- name: Install env plugin
+ helm_plugin:
+ binary_path: "{{ helm_binary }}"
+ namespace: "{{ helm_namespace }}"
+ state: present
+ plugin_path: https://github.com/adamreese/helm-env
+ register: install_env
+
+- assert:
+ that:
+ - install_env.changed
+
+- name: Gather info about all plugin
+ helm_plugin_info:
+ binary_path: "{{ helm_binary }}"
+ namespace: "{{ helm_namespace }}"
+ register: plugin_info
+
+- assert:
+ that:
+ - plugin_info.plugin_list is defined
+
+- name: Install env plugin again
+ helm_plugin:
+ binary_path: "{{ helm_binary }}"
+ namespace: "{{ helm_namespace }}"
+ state: present
+ plugin_path: https://github.com/adamreese/helm-env
+ register: install_env
+
+- assert:
+ that:
+ - not install_env.changed
+
+- name: Uninstall env plugin in check mode
+ helm_plugin:
+ binary_path: "{{ helm_binary }}"
+ namespace: "{{ helm_namespace }}"
+ state: absent
+ plugin_name: env
+ register: check_uninstall_env
+ check_mode: true
+
+- assert:
+ that:
+ - check_uninstall_env.changed
+
+- name: Uninstall env plugin
+ helm_plugin:
+ binary_path: "{{ helm_binary }}"
+ namespace: "{{ helm_namespace }}"
+ state: absent
+ plugin_name: env
+ register: uninstall_env
+
+- assert:
+ that:
+ - uninstall_env.changed
+
+- name: Uninstall env plugin again
+ helm_plugin:
+ binary_path: "{{ helm_binary }}"
+ namespace: "{{ helm_namespace }}"
+ state: absent
+ plugin_name: env
+ register: uninstall_env
+
+- assert:
+ that:
+ - not uninstall_env.changed
diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/roles/helm/tasks/tests_repository.yml b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/roles/helm/tasks/tests_repository.yml
new file mode 100644
index 00000000..9d274819
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/roles/helm/tasks/tests_repository.yml
@@ -0,0 +1,61 @@
+---
+- name: "Ensure test_helm_repo doesn't exist"
+ helm_repository:
+ name: test_helm_repo
+ state: absent
+
+- name: Add test_helm_repo chart repository
+ helm_repository:
+ name: test_helm_repo
+ repo_url: "{{ chart_test_repo }}"
+ register: repository
+
+- name: Assert that test_helm_repo repository is added
+ assert:
+ that:
+ - repository is changed
+
+- name: Check idempotency
+ helm_repository:
+ name: test_helm_repo
+ repo_url: "{{ chart_test_repo }}"
+ register: repository
+
+- name: Assert idempotency
+ assert:
+ that:
+ - repository is not changed
+
+- name: Failed to add repository with the same name
+ helm_repository:
+ name: test_helm_repo
+ repo_url: "https://other-charts.url"
+ register: repository_errors
+ ignore_errors: yes
+
+- name: Assert that adding repository with the same name failed
+ assert:
+ that:
+ - repository_errors is failed
+
+- name: Remove test_helm_repo chart repository
+ helm_repository:
+ name: test_helm_repo
+ state: absent
+ register: repository
+
+- name: Assert that test_helm_repo repository is removed
+ assert:
+ that:
+ - repository is changed
+
+- name: Check idempotency after remove
+ helm_repository:
+ name: test_helm_repo
+ state: absent
+ register: repository
+
+- name: Assert idempotency
+ assert:
+ that:
+ - repository is not changed
diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/tasks/access_review.yml b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/tasks/access_review.yml
new file mode 100644
index 00000000..78d6d567
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/tasks/access_review.yml
@@ -0,0 +1,22 @@
+---
+- name: Create a SelfSubjectAccessReview resource
+ register: can_i_create_namespaces
+ ignore_errors: yes
+ k8s:
+ state: present
+ definition:
+ apiVersion: authorization.k8s.io/v1
+ kind: SelfSubjectAccessReview
+ spec:
+ resourceAttributes:
+ group: v1
+ resource: Namespace
+ verb: create
+
+- name: Assert that the SelfSubjectAccessReview request succeded
+ assert:
+ that:
+ - can_i_create_namespaces is successful
+ - can_i_create_namespaces.result.status is defined
+ - can_i_create_namespaces.result.status.allowed is defined
+ - can_i_create_namespaces.result.status.allowed
diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/tasks/append_hash.yml b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/tasks/append_hash.yml
new file mode 100644
index 00000000..9c726a3d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/tasks/append_hash.yml
@@ -0,0 +1,69 @@
+---
+- block:
+ - name: Ensure that append_hash namespace exists
+ k8s:
+ kind: Namespace
+ name: append-hash
+
+ - name: Create k8s_resource variable
+ set_fact:
+ k8s_resource:
+ metadata:
+ name: config-map-test
+ namespace: append-hash
+ apiVersion: v1
+ kind: ConfigMap
+ data:
+ hello: world
+
+ - name: Create config map
+ k8s:
+ definition: "{{ k8s_resource }}"
+ append_hash: yes
+ register: k8s_configmap1
+
+ - name: Check configmap is created with a hash
+ assert:
+ that:
+ - k8s_configmap1 is changed
+ - k8s_configmap1.result.metadata.name != 'config-map-test'
+ - k8s_configmap1.result.metadata.name[:-10] == 'config-map-test-'
+
+ - name: Recreate same config map
+ k8s:
+ definition: "{{ k8s_resource }}"
+ append_hash: yes
+ register: k8s_configmap2
+
+ - name: Check configmaps are different
+ assert:
+ that:
+ - k8s_configmap2 is not changed
+ - k8s_configmap1.result.metadata.name == k8s_configmap2.result.metadata.name
+
+ - name: Add key to config map
+ k8s:
+ definition:
+ metadata:
+ name: config-map-test
+ namespace: append-hash
+ apiVersion: v1
+ kind: ConfigMap
+ data:
+ hello: world
+ another: value
+ append_hash: yes
+ register: k8s_configmap3
+
+ - name: Check configmaps are different
+ assert:
+ that:
+ - k8s_configmap3 is changed
+ - k8s_configmap1.result.metadata.name != k8s_configmap3.result.metadata.name
+
+ always:
+ - name: Ensure that namespace is removed
+ k8s:
+ kind: Namespace
+ name: append-hash
+ state: absent
diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/tasks/apply.yml b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/tasks/apply.yml
new file mode 100644
index 00000000..2f579755
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/tasks/apply.yml
@@ -0,0 +1,769 @@
+---
+- block:
+ - set_fact:
+ apply_namespace: apply
+
+ - name: Ensure namespace exists
+ k8s:
+ definition:
+ apiVersion: v1
+ kind: Namespace
+ metadata:
+ name: "{{ apply_namespace }}"
+
+ - name: Add a configmap
+ k8s:
+ name: "apply-configmap"
+ namespace: "{{ apply_namespace }}"
+ definition:
+ kind: ConfigMap
+ apiVersion: v1
+ data:
+ one: "1"
+ two: "2"
+ three: "3"
+ apply: yes
+ register: k8s_configmap
+
+ - name: Check configmap was created
+ assert:
+ that:
+ - k8s_configmap is changed
+ - k8s_configmap.result.metadata.annotations|default(False)
+
+ - name: Add same configmap again
+ k8s:
+ definition:
+ kind: ConfigMap
+ apiVersion: v1
+ metadata:
+ name: "apply-configmap"
+ namespace: "{{ apply_namespace }}"
+ data:
+ one: "1"
+ two: "2"
+ three: "3"
+ apply: yes
+ register: k8s_configmap_2
+
+ - name: Check nothing changed
+ assert:
+ that:
+ - k8s_configmap_2 is not changed
+
+ - name: Add same configmap again with check mode on
+ k8s:
+ definition:
+ kind: ConfigMap
+ apiVersion: v1
+ metadata:
+ name: "apply-configmap"
+ namespace: "{{ apply_namespace }}"
+ data:
+ one: "1"
+ two: "2"
+ three: "3"
+ apply: yes
+ check_mode: yes
+ register: k8s_configmap_check
+
+ - name: Check nothing changed
+ assert:
+ that:
+ - k8s_configmap_check is not changed
+
+ - name: Add same configmap again but using name and namespace args
+ k8s:
+ name: "apply-configmap"
+ namespace: "{{ apply_namespace }}"
+ definition:
+ kind: ConfigMap
+ apiVersion: v1
+ data:
+ one: "1"
+ two: "2"
+ three: "3"
+ apply: yes
+ register: k8s_configmap_2a
+
+ - name: Check nothing changed
+ assert:
+ that:
+ - k8s_configmap_2a is not changed
+
+ - name: Update configmap
+ k8s:
+ definition:
+ kind: ConfigMap
+ apiVersion: v1
+ metadata:
+ name: "apply-configmap"
+ namespace: "{{ apply_namespace }}"
+ data:
+ one: "1"
+ three: "3"
+ four: "4"
+ apply: yes
+ register: k8s_configmap_3
+
+ - name: Ensure that configmap has been correctly updated
+ assert:
+ that:
+ - k8s_configmap_3 is changed
+ - "'four' in k8s_configmap_3.result.data"
+ - "'two' not in k8s_configmap_3.result.data"
+
+ - name: Add a service
+ k8s:
+ definition:
+ apiVersion: v1
+ kind: Service
+ metadata:
+ name: apply-svc
+ namespace: "{{ apply_namespace }}"
+ spec:
+ selector:
+ app: whatever
+ ports:
+ - name: http
+ port: 8080
+ targetPort: 8080
+ apply: yes
+ register: k8s_service
+
+ - name: Add exactly same service
+ k8s:
+ definition:
+ apiVersion: v1
+ kind: Service
+ metadata:
+ name: apply-svc
+ namespace: "{{ apply_namespace }}"
+ spec:
+ selector:
+ app: whatever
+ ports:
+ - name: http
+ port: 8080
+ targetPort: 8080
+ apply: yes
+ register: k8s_service_2
+
+ - name: Check nothing changed
+ assert:
+ that:
+ - k8s_service_2 is not changed
+
+ - name: Add exactly same service in check mode
+ k8s:
+ definition:
+ apiVersion: v1
+ kind: Service
+ metadata:
+ name: apply-svc
+ namespace: "{{ apply_namespace }}"
+ spec:
+ selector:
+ app: whatever
+ ports:
+ - name: http
+ port: 8080
+ targetPort: 8080
+ apply: yes
+ register: k8s_service_3
+ check_mode: yes
+
+ - name: Check nothing changed
+ assert:
+ that:
+ - k8s_service_3 is not changed
+
+ - name: Change service ports
+ k8s:
+ definition:
+ apiVersion: v1
+ kind: Service
+ metadata:
+ name: apply-svc
+ namespace: "{{ apply_namespace }}"
+ spec:
+ selector:
+ app: whatever
+ ports:
+ - name: http
+ port: 8081
+ targetPort: 8081
+ apply: yes
+ register: k8s_service_4
+
+ - name: Check ports are correct
+ assert:
+ that:
+ - k8s_service_4 is changed
+ - k8s_service_4.result.spec.ports | length == 1
+ - k8s_service_4.result.spec.ports[0].port == 8081
+
+ - name: Insert new service port
+ k8s:
+ definition:
+ apiVersion: v1
+ kind: Service
+ metadata:
+ name: apply-svc
+ namespace: "{{ apply_namespace }}"
+ spec:
+ selector:
+ app: whatever
+ ports:
+ - name: mesh
+ port: 8080
+ targetPort: 8080
+ - name: http
+ port: 8081
+ targetPort: 8081
+ apply: yes
+ register: k8s_service_4
+
+ - name: Check ports are correct
+ assert:
+ that:
+ - k8s_service_4 is changed
+ - k8s_service_4.result.spec.ports | length == 2
+ - k8s_service_4.result.spec.ports[0].port == 8080
+ - k8s_service_4.result.spec.ports[1].port == 8081
+
+ - name: Remove new service port (check mode)
+ k8s:
+ definition:
+ apiVersion: v1
+ kind: Service
+ metadata:
+ name: apply-svc
+ namespace: "{{ apply_namespace }}"
+ spec:
+ selector:
+ app: whatever
+ ports:
+ - name: http
+ port: 8081
+ targetPort: 8081
+ apply: yes
+ check_mode: yes
+ register: k8s_service_check
+
+ - name: Check ports are correct
+ assert:
+ that:
+ - k8s_service_check is changed
+ - k8s_service_check.result.spec.ports | length == 1
+ - k8s_service_check.result.spec.ports[0].port == 8081
+
+ - name: Remove new service port
+ k8s:
+ definition:
+ apiVersion: v1
+ kind: Service
+ metadata:
+ name: apply-svc
+ namespace: "{{ apply_namespace }}"
+ spec:
+ selector:
+ app: whatever
+ ports:
+ - name: http
+ port: 8081
+ targetPort: 8081
+ apply: yes
+ register: k8s_service_5
+
+ - name: Check ports are correct
+ assert:
+ that:
+ - k8s_service_5 is changed
+ - k8s_service_5.result.spec.ports | length == 1
+ - k8s_service_5.result.spec.ports[0].port == 8081
+
+ - name: Add a serviceaccount
+ k8s:
+ definition:
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: apply-deploy
+ namespace: "{{ apply_namespace }}"
+
+ - name: Add a deployment
+ k8s:
+ definition:
+ apiVersion: apps/v1
+ kind: Deployment
+ metadata:
+ name: apply-deploy
+ namespace: "{{ apply_namespace }}"
+ spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: "{{ k8s_pod_name }}"
+ template: "{{ k8s_pod_template }}"
+ wait: yes
+ apply: yes
+ vars:
+ k8s_pod_name: apply-deploy
+ k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:v0.10.0-green
+ k8s_pod_service_account: apply-deploy
+ k8s_pod_ports:
+ - containerPort: 8080
+ name: http
+ protocol: TCP
+ k8s_pod_resources:
+ requests:
+ cpu: 100m
+ memory: 100Mi
+ limits:
+ cpu: 100m
+ memory: 100Mi
+
+ - name: Update the earlier deployment in check mode
+ k8s:
+ definition:
+ apiVersion: apps/v1
+ kind: Deployment
+ metadata:
+ name: apply-deploy
+ namespace: "{{ apply_namespace }}"
+ spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: "{{ k8s_pod_name }}"
+ template: "{{ k8s_pod_template }}"
+ wait: yes
+ apply: yes
+ check_mode: yes
+ vars:
+ k8s_pod_name: apply-deploy
+ k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:v0.10.0-purple
+ k8s_pod_service_account: apply-deploy
+ k8s_pod_ports:
+ - containerPort: 8080
+ name: http
+ protocol: TCP
+ k8s_pod_resources:
+ requests:
+ cpu: 50m
+ limits:
+ cpu: 50m
+ memory: 50Mi
+ register: update_deploy_check_mode
+
+ - name: Ensure check mode change took
+ assert:
+ that:
+ - update_deploy_check_mode is changed
+ - "update_deploy_check_mode.result.spec.template.spec.containers[0].image == 'gcr.io/kuar-demo/kuard-amd64:v0.10.0-purple'"
+
+ - name: Update the earlier deployment
+ k8s:
+ definition:
+ apiVersion: apps/v1
+ kind: Deployment
+ metadata:
+ name: apply-deploy
+ namespace: "{{ apply_namespace }}"
+ spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: "{{ k8s_pod_name }}"
+ template: "{{ k8s_pod_template }}"
+ wait: yes
+ apply: yes
+ vars:
+ k8s_pod_name: apply-deploy
+ k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:v0.10.0-purple
+ k8s_pod_service_account: apply-deploy
+ k8s_pod_ports:
+ - containerPort: 8080
+ name: http
+ protocol: TCP
+ k8s_pod_resources:
+ requests:
+ cpu: 50m
+ limits:
+ cpu: 50m
+ memory: 50Mi
+ register: update_deploy_for_real
+
+ - name: Ensure change took
+ assert:
+ that:
+ - update_deploy_for_real is changed
+ - "update_deploy_for_real.result.spec.template.spec.containers[0].image == 'gcr.io/kuar-demo/kuard-amd64:v0.10.0-purple'"
+
+ - name: Remove the serviceaccount
+ k8s:
+ state: absent
+ definition:
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: apply-deploy
+ namespace: "{{ apply_namespace }}"
+
+ - name: Apply deployment after service account removed
+ k8s:
+ definition:
+ apiVersion: apps/v1
+ kind: Deployment
+ metadata:
+ name: apply-deploy
+ namespace: "{{ apply_namespace }}"
+ spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: "{{ k8s_pod_name }}"
+ template: "{{ k8s_pod_template }}"
+ wait: yes
+ apply: yes
+ vars:
+ k8s_pod_name: apply-deploy
+ k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:v0.10.0-green
+ k8s_pod_service_account: apply-deploy
+ k8s_pod_ports:
+ - containerPort: 8080
+ name: http
+ protocol: TCP
+ k8s_pod_resources:
+ requests:
+ cpu: 50m
+ limits:
+ cpu: 50m
+ memory: 50Mi
+ register: deploy_after_serviceaccount_removal
+ ignore_errors: yes
+
+ - name: Ensure that updating deployment after service account removal failed
+ assert:
+ that:
+ - deploy_after_serviceaccount_removal is failed
+
+ - name: Insert new service port
+ k8s:
+ definition:
+ apiVersion: v1
+ kind: Service
+ metadata:
+ name: apply-svc
+ namespace: "{{ apply_namespace }}"
+ spec:
+ selector:
+ app: whatever
+ ports:
+ - name: mesh
+ port: 8080
+ targetPort: 8080
+ - name: http
+ port: 8081
+ targetPort: 8081
+ apply: yes
+ register: k8s_service_4
+
+ - name: Check ports are correct
+ assert:
+ that:
+ - k8s_service_4 is changed
+ - k8s_service_4.result.spec.ports | length == 2
+ - k8s_service_4.result.spec.ports[0].port == 8080
+ - k8s_service_4.result.spec.ports[1].port == 8081
+
+ - name: Remove new service port (check mode)
+ k8s:
+ definition:
+ apiVersion: v1
+ kind: Service
+ metadata:
+ name: apply-svc
+ namespace: "{{ apply_namespace }}"
+ spec:
+ selector:
+ app: whatever
+ ports:
+ - name: http
+ port: 8081
+ targetPort: 8081
+ apply: yes
+ check_mode: yes
+ register: k8s_service_check
+
+ - name: Check ports are correct
+ assert:
+ that:
+ - k8s_service_check is changed
+ - k8s_service_check.result.spec.ports | length == 1
+ - k8s_service_check.result.spec.ports[0].port == 8081
+
+ - name: Remove new service port
+ k8s:
+ definition:
+ apiVersion: v1
+ kind: Service
+ metadata:
+ name: apply-svc
+ namespace: "{{ apply_namespace }}"
+ spec:
+ selector:
+ app: whatever
+ ports:
+ - name: http
+ port: 8081
+ targetPort: 8081
+ apply: yes
+ register: k8s_service_5
+
+ - name: Check ports are correct
+ assert:
+ that:
+ - k8s_service_5 is changed
+ - k8s_service_5.result.spec.ports | length == 1
+ - k8s_service_5.result.spec.ports[0].port == 8081
+
+ - name: Add a serviceaccount
+ k8s:
+ definition:
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: apply-deploy
+ namespace: "{{ apply_namespace }}"
+
+ - name: Add a deployment
+ k8s:
+ definition:
+ apiVersion: apps/v1
+ kind: Deployment
+ metadata:
+ name: apply-deploy
+ namespace: "{{ apply_namespace }}"
+ spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: "{{ k8s_pod_name }}"
+ template: "{{ k8s_pod_template }}"
+ wait: yes
+ apply: yes
+ vars:
+ k8s_pod_name: apply-deploy
+ k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:v0.10.0-green
+ k8s_pod_service_account: apply-deploy
+ k8s_pod_ports:
+ - containerPort: 8080
+ name: http
+ protocol: TCP
+
+ - name: Remove the serviceaccount
+ k8s:
+ state: absent
+ definition:
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: apply-deploy
+ namespace: "{{ apply_namespace }}"
+
+ - name: Update the earlier deployment
+ k8s:
+ definition:
+ apiVersion: apps/v1
+ kind: Deployment
+ metadata:
+ name: apply-deploy
+ namespace: "{{ apply_namespace }}"
+ spec:
+ replicas: 2
+ selector:
+ matchLabels:
+ app: "{{ k8s_pod_name }}"
+ template: "{{ k8s_pod_template }}"
+ wait: yes
+ apply: yes
+ vars:
+ k8s_pod_name: apply-deploy
+ k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:v0.10.0-purple
+ k8s_pod_service_account: apply-deploy
+ k8s_pod_ports:
+ - containerPort: 8080
+ name: http
+ protocol: TCP
+ register: deploy_after_serviceaccount_removal
+ ignore_errors: yes
+
+ - name: Ensure that updating deployment after service account removal failed
+ assert:
+ that:
+ - deploy_after_serviceaccount_removal is failed
+
+ - name: Insert new service port
+ k8s:
+ definition:
+ apiVersion: v1
+ kind: Service
+ metadata:
+ name: apply-svc
+ namespace: "{{ apply_namespace }}"
+ spec:
+ selector:
+ app: whatever
+ ports:
+ - name: mesh
+ port: 8080
+ targetPort: 8080
+ - name: http
+ port: 8081
+ targetPort: 8081
+ apply: yes
+ register: k8s_service_4
+
+ - name: Check ports are correct
+ assert:
+ that:
+ - k8s_service_4 is changed
+ - k8s_service_4.result.spec.ports | length == 2
+ - k8s_service_4.result.spec.ports[0].port == 8080
+ - k8s_service_4.result.spec.ports[1].port == 8081
+
+ - name: Remove new service port (check mode)
+ k8s:
+ definition:
+ apiVersion: v1
+ kind: Service
+ metadata:
+ name: apply-svc
+ namespace: "{{ apply_namespace }}"
+ spec:
+ selector:
+ app: whatever
+ ports:
+ - name: http
+ port: 8081
+ targetPort: 8081
+ apply: yes
+ check_mode: yes
+ register: k8s_service_check
+
+ - name: Check ports are correct
+ assert:
+ that:
+ - k8s_service_check is changed
+ - k8s_service_check.result.spec.ports | length == 1
+ - k8s_service_check.result.spec.ports[0].port == 8081
+
+ - name: Remove new service port
+ k8s:
+ definition:
+ apiVersion: v1
+ kind: Service
+ metadata:
+ name: apply-svc
+ namespace: "{{ apply_namespace }}"
+ spec:
+ selector:
+ app: whatever
+ ports:
+ - name: http
+ port: 8081
+ targetPort: 8081
+ apply: yes
+ register: k8s_service_5
+
+ - name: Check ports are correct
+ assert:
+ that:
+ - k8s_service_5 is changed
+ - k8s_service_5.result.spec.ports | length == 1
+ - k8s_service_5.result.spec.ports[0].port == 8081
+
+ - name: Add a serviceaccount
+ k8s:
+ definition:
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: apply-deploy
+ namespace: "{{ apply_namespace }}"
+
+ - name: Add a deployment
+ k8s:
+ definition:
+ apiVersion: apps/v1
+ kind: Deployment
+ metadata:
+ name: apply-deploy
+ namespace: "{{ apply_namespace }}"
+ spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: "{{ k8s_pod_name }}"
+ template: "{{ k8s_pod_template }}"
+ wait: yes
+ apply: yes
+ vars:
+ k8s_pod_name: apply-deploy
+ k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:v0.10.0-green
+ k8s_pod_service_account: apply-deploy
+ k8s_pod_ports:
+ - containerPort: 8080
+ name: http
+ protocol: TCP
+
+ - name: Remove the serviceaccount
+ k8s:
+ state: absent
+ definition:
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: apply-deploy
+ namespace: "{{ apply_namespace }}"
+
+ - name: Update the earlier deployment
+ k8s:
+ definition:
+ apiVersion: apps/v1
+ kind: Deployment
+ metadata:
+ name: apply-deploy
+ namespace: "{{ apply_namespace }}"
+ spec:
+ replicas: 2
+ selector:
+ matchLabels:
+ app: "{{ k8s_pod_name }}"
+ template: "{{ k8s_pod_template }}"
+ wait: yes
+ apply: yes
+ vars:
+ k8s_pod_name: apply-deploy
+ k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:v0.10.0-purple
+ k8s_pod_service_account: apply-deploy
+ k8s_pod_ports:
+ - containerPort: 8080
+ name: http
+ protocol: TCP
+ register: deploy_after_serviceaccount_removal
+ ignore_errors: yes
+
+ - name: Ensure that updating deployment after service account removal failed
+ assert:
+ that:
+ - deploy_after_serviceaccount_removal is failed
+
+ always:
+ - name: Remove namespace
+ k8s:
+ kind: Namespace
+ name: "{{ apply_namespace }}"
+ state: absent
diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/tasks/cluster_info.yml b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/tasks/cluster_info.yml
new file mode 100644
index 00000000..644de153
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/tasks/cluster_info.yml
@@ -0,0 +1,22 @@
+---
+- name: Get Information about All APIs
+ k8s_cluster_info:
+ register: api_details
+
+- name: Print all APIs for debugging
+ debug:
+ msg: "{{ api_details.apis }}"
+
+- name: Get core API version
+ set_fact:
+ crd: "{{ api_details.apis['apiextensions.k8s.io'] }}"
+ host: "{{ api_details.connection['host'] }}"
+ client_version: "{{ api_details.version['client'] }}"
+
+- name: Check if all APIs are present
+ assert:
+ that:
+ - api_details.apis is defined
+ - crd is defined
+ - host is defined
+ - client_version is defined
diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/tasks/crd.yml b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/tasks/crd.yml
new file mode 100644
index 00000000..9b1f5e89
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/tasks/crd.yml
@@ -0,0 +1,66 @@
+---
+- block:
+ - name: Create a namespace
+ k8s:
+ name: crd
+ kind: Namespace
+
+ - name: Install custom resource definitions
+ k8s:
+ definition: "{{ lookup('file', kubernetes_role_path + '/files/setup-crd.yml') }}"
+
+ - name: Pause 5 seconds to avoid race condition
+ pause:
+ seconds: 5
+
+ - name: Create custom resource definition
+ k8s:
+ definition: "{{ lookup('file', kubernetes_role_path + '/files/crd-resource.yml') }}"
+ namespace: crd
+ apply: "{{ create_crd_with_apply | default(omit) }}"
+ register: create_crd
+
+ - name: Patch custom resource definition
+ k8s:
+ definition: "{{ lookup('file', kubernetes_role_path + '/files/crd-resource.yml') }}"
+ namespace: crd
+ register: recreate_crd
+ ignore_errors: yes
+
+ - name: Assert that recreating crd is as expected
+ assert:
+ that:
+ - recreate_crd is not failed
+
+ - block:
+ - name: Recreate custom resource definition with merge_type
+ k8s:
+ definition: "{{ lookup('file', kubernetes_role_path + '/files/crd-resource.yml') }}"
+ merge_type: merge
+ namespace: crd
+ register: recreate_crd_with_merge
+
+ - name: Recreate custom resource definition with merge_type list
+ k8s:
+ definition: "{{ lookup('file', kubernetes_role_path + '/files/crd-resource.yml') }}"
+ merge_type:
+ - strategic-merge
+ - merge
+ namespace: crd
+ register: recreate_crd_with_merge_list
+ when: recreate_crd is successful
+
+
+ - name: Remove crd
+ k8s:
+ definition: "{{ lookup('file', kubernetes_role_path + '/files/crd-resource.yml') }}"
+ namespace: crd
+ state: absent
+
+ always:
+ - name: Remove crd namespace
+ k8s:
+ kind: Namespace
+ name: crd
+ state: absent
+ ignore_errors: yes
diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/tasks/delete.yml b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/tasks/delete.yml
new file mode 100644
index 00000000..e49ff221
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/tasks/delete.yml
@@ -0,0 +1,95 @@
+---
+- block:
+ - set_fact:
+ delete_namespace: delete
+
+ - name: Ensure namespace exists
+ k8s:
+ definition:
+ apiVersion: v1
+ kind: Namespace
+ metadata:
+ name: "{{ delete_namespace }}"
+
+ - name: Add a daemonset
+ k8s:
+ definition:
+ apiVersion: apps/v1
+ kind: DaemonSet
+ metadata:
+ name: delete-daemonset
+ namespace: "{{ delete_namespace }}"
+ spec:
+ selector:
+ matchLabels:
+ app: "{{ k8s_pod_name }}"
+ template: "{{ k8s_pod_template }}"
+ wait: yes
+ wait_timeout: 180
+ vars:
+ k8s_pod_name: delete-ds
+ k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:1
+ register: ds
+
+ - name: Check that daemonset wait worked
+ assert:
+ that:
+ - ds.result.status.currentNumberScheduled == ds.result.status.desiredNumberScheduled
+
+ - name: Check if pods exist
+ k8s_info:
+ namespace: "{{ delete_namespace }}"
+ kind: Pod
+ label_selectors:
+ - "app={{ k8s_pod_name }}"
+ vars:
+ k8s_pod_name: delete-ds
+ register: pods_create
+
+ - name: Assert that there are pods
+ assert:
+ that:
+ - pods_create.resources
+
+ - name: Remove the daemonset
+ k8s:
+ kind: DaemonSet
+ name: delete-daemonset
+ namespace: "{{ delete_namespace }}"
+ state: absent
+ wait: yes
+
+ - name: Show status of pods
+ k8s_info:
+ namespace: "{{ delete_namespace }}"
+ kind: Pod
+ label_selectors:
+ - "app={{ k8s_pod_name }}"
+ vars:
+ k8s_pod_name: delete-ds
+
+ - name: Wait for background deletion
+ pause:
+ seconds: 30
+
+ - name: Check if pods still exist
+ k8s_info:
+ namespace: "{{ delete_namespace }}"
+ kind: Pod
+ label_selectors:
+ - "app={{ k8s_pod_name }}"
+ vars:
+ k8s_pod_name: delete-ds
+ register: pods_delete
+
+ - name: Assert that deleting the daemonset deleted the pods
+ assert:
+ that:
+ - not pods_delete.resources
+
+ always:
+ - name: Remove namespace
+ k8s:
+ kind: Namespace
+ name: "{{ delete_namespace }}"
+ state: absent
diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/tasks/exec.yml b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/tasks/exec.yml
new file mode 100644
index 00000000..5397ab95
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/tasks/exec.yml
@@ -0,0 +1,64 @@
+---
+- vars:
+ exec_namespace: k8s-exec
+ pod: sleep-pod
+ exec_pod_definition:
+ apiVersion: v1
+ kind: Pod
+ metadata:
+ name: "{{ pod }}"
+ namespace: "{{ exec_namespace }}"
+ spec:
+ containers:
+ - name: sleeper
+ image: busybox
+ command: ["sleep", "infinity"]
+
+ block:
+ - name: "Ensure that {{ exec_namespace }} namespace exists"
+ k8s:
+ kind: Namespace
+ name: "{{ exec_namespace }}"
+
+ - name: "Create a pod"
+ k8s:
+ definition: "{{ exec_pod_definition }}"
+ wait: yes
+ wait_sleep: 1
+ wait_timeout: 30
+
+ - name: "Execute a command"
+ k8s_exec:
+ pod: "{{ pod }}"
+ namespace: "{{ exec_namespace }}"
+ command: cat /etc/resolv.conf
+ register: output
+
+ - name: "Show k8s_exec output"
+ debug:
+ var: output
+
+ - name: "Assert k8s_exec output is correct"
+ assert:
+ that:
+ - "'nameserver' in output.stdout"
+
+ - name: Check if rc is returned for the given command
+ k8s_exec:
+ namespace: "{{ exec_namespace }}"
+ pod: "{{ pod }}"
+ command: 'false'
+ register: command_status
+ ignore_errors: True
+
+ - name: Check last command status
+ assert:
+ that:
+ - command_status.return_code != 0
+
+ always:
+ - name: "Cleanup namespace"
+ k8s:
+ kind: Namespace
+ name: "{{ exec_namespace }}"
+ state: absent
diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/tasks/full.yml b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/tasks/full.yml
new file mode 100644
index 00000000..d2666797
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/tasks/full.yml
@@ -0,0 +1,373 @@
+---
+- block:
+ - name: Create a namespace
+ k8s:
+ name: testing
+ kind: Namespace
+ register: output
+
+ - name: Show output
+ debug:
+ var: output
+
+ # TODO: See https://github.com/ansible-collections/community.kubernetes/issues/24
+ # - name: Setting validate_certs to true causes a failure
+ # k8s:
+ # name: testing
+ # kind: Namespace
+ # validate_certs: yes
+ # ignore_errors: yes
+ # register: output
+ #
+ # - name: assert that validate_certs caused a failure (and therefore was correctly translated to verify_ssl)
+ # assert:
+ # that:
+ # - output is failed
+
+ - name: Ensure k8s_info works with empty resources
+ k8s_info:
+ kind: Deployment
+ namespace: testing
+ api_version: apps/v1
+ register: k8s_info
+
+ - name: Assert that k8s_info is in correct format
+ assert:
+ that:
+ - "'resources' in k8s_info"
+ - not k8s_info.resources
+
+ - name: Create a service
+ k8s:
+ state: present
+ resource_definition: &svc
+ apiVersion: v1
+ kind: Service
+ metadata:
+ name: web
+ namespace: testing
+ labels:
+ app: galaxy
+ service: web
+ spec:
+ selector:
+ app: galaxy
+ service: web
+ ports:
+ - protocol: TCP
+ targetPort: 8000
+ name: port-8000-tcp
+ port: 8000
+ register: output
+
+ - name: Show output
+ debug:
+ var: output
+
+ - name: Create the service again
+ k8s:
+ state: present
+ resource_definition: *svc
+ register: output
+
+ - name: Service creation should be idempotent
+ assert:
+ that: not output.changed
+
+ - name: Create a ConfigMap
+ k8s:
+ kind: ConfigMap
+ name: test-force-update
+ namespace: testing
+ definition:
+ data:
+ key: value
+
+ - name: Force update ConfigMap
+ k8s:
+ kind: ConfigMap
+ name: test-force-update
+ namespace: testing
+ definition:
+ data:
+ key: newvalue
+ force: yes
+
+ - name: Create PVC
+ k8s:
+ state: present
+ inline: &pvc
+ apiVersion: v1
+ kind: PersistentVolumeClaim
+ metadata:
+ name: elastic-volume
+ namespace: testing
+ spec:
+ resources:
+ requests:
+ storage: 5Gi
+ accessModes:
+ - ReadWriteOnce
+
+ - name: Show output
+ debug:
+ var: output
+
+ - name: Create the PVC again
+ k8s:
+ state: present
+ inline: *pvc
+
+ - name: Ensure PVC creation is idempotent
+ assert:
+ that: not output.changed
+
+ - name: Create deployment
+ k8s:
+ state: present
+ inline: &deployment
+ apiVersion: apps/v1
+ kind: Deployment
+ metadata:
+ name: elastic
+ labels:
+ app: galaxy
+ service: elastic
+ namespace: testing
+ spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: galaxy
+ service: elastic
+ template:
+ metadata:
+ labels:
+ app: galaxy
+ service: elastic
+ spec:
+ containers:
+ - name: elastic
+ volumeMounts:
+ - mountPath: /usr/share/elasticsearch/data
+ name: elastic-volume
+ command: ['elasticsearch']
+ image: 'ansible/galaxy-elasticsearch:2.4.6'
+ volumes:
+ - name: elastic-volume
+ persistentVolumeClaim:
+ claimName: elastic-volume
+ strategy:
+ type: RollingUpdate
+ register: output
+
+ - name: Show output
+ debug:
+ var: output
+
+ - name: Create deployment again
+ k8s:
+ state: present
+ inline: *deployment
+ register: output
+
+ - name: Ensure Deployment creation is idempotent
+ assert:
+ that: not output.changed
+
+ ### Type tests
+ - name: Create a namespace from a string
+ k8s:
+ definition: |+
+ ---
+ kind: Namespace
+ apiVersion: v1
+ metadata:
+ name: testing1
+
+ ### https://github.com/ansible-collections/community.kubernetes/issues/111
+ - set_fact:
+ api_groups: "{{ lookup('k8s', cluster_info='api_groups') }}"
+
+ - debug:
+ var: api_groups
+
+ - name: Namespace should exist
+ k8s_info:
+ kind: Namespace
+ api_version: v1
+ name: testing1
+ register: k8s_info_testing1
+ failed_when: not k8s_info_testing1.resources or k8s_info_testing1.resources[0].status.phase != "Active"
+
+ - name: Create resources from a multidocument yaml string
+ k8s:
+ definition: |+
+ ---
+ kind: Namespace
+ apiVersion: v1
+ metadata:
+ name: testing2
+ ---
+ kind: Namespace
+ apiVersion: v1
+ metadata:
+ name: testing3
+
+ - name: Lookup namespaces
+ k8s_info:
+ api_version: v1
+ kind: Namespace
+ name: "{{ item }}"
+ loop:
+ - testing2
+ - testing3
+ register: k8s_namespaces
+
+ - name: Resources should exist
+ assert:
+ that: item.resources[0].status.phase == 'Active'
+ loop: "{{ k8s_namespaces.results }}"
+
+ - name: Delete resources from a multidocument yaml string
+ k8s:
+ state: absent
+ definition: |+
+ ---
+ kind: Namespace
+ apiVersion: v1
+ metadata:
+ name: testing2
+ ---
+ kind: Namespace
+ apiVersion: v1
+ metadata:
+ name: testing3
+
+ - name: Lookup namespaces
+ k8s_info:
+ api_version: v1
+ kind: Namespace
+ name: "{{ item }}"
+ loop:
+ - testing2
+ - testing3
+ register: k8s_namespaces
+
+ - name: Resources should not exist
+ assert:
+ that:
+ - not item.resources or item.resources[0].status.phase == "Terminating"
+ loop: "{{ k8s_namespaces.results }}"
+
+ - name: Create resources from a list
+ k8s:
+ definition:
+ - kind: Namespace
+ apiVersion: v1
+ metadata:
+ name: testing4
+ - kind: Namespace
+ apiVersion: v1
+ metadata:
+ name: testing5
+
+ - name: Lookup namespaces
+ k8s_info:
+ api_version: v1
+ kind: Namespace
+ name: "{{ item }}"
+ loop:
+ - testing4
+ - testing5
+ register: k8s_namespaces
+
+ - name: Resources should exist
+ assert:
+ that: item.resources[0].status.phase == 'Active'
+ loop: "{{ k8s_namespaces.results }}"
+
+ - name: Delete resources from a list
+ k8s:
+ state: absent
+ definition:
+ - kind: Namespace
+ apiVersion: v1
+ metadata:
+ name: testing4
+ - kind: Namespace
+ apiVersion: v1
+ metadata:
+ name: testing5
+
+ - name: Get info about terminating resources
+ k8s_info:
+ api_version: v1
+ kind: Namespace
+ name: "{{ item }}"
+ loop:
+ - testing4
+ - testing5
+ register: k8s_info
+
+ - name: Ensure resources are terminating if still in results
+ assert:
+ that: not item.resources or item.resources[0].status.phase == "Terminating"
+ loop: "{{ k8s_info.results }}"
+
+ - name: Create resources from a yaml string ending with ---
+ k8s:
+ definition: |+
+ ---
+ kind: Namespace
+ apiVersion: v1
+ metadata:
+ name: testing6
+ ---
+
+ - name: Namespace should exist
+ k8s_info:
+ kind: Namespace
+ api_version: v1
+ name: testing6
+ register: k8s_info_testing6
+ failed_when: not k8s_info_testing6.resources or k8s_info_testing6.resources[0].status.phase != "Active"
+
+ - include_tasks: crd.yml
+ - include_tasks: lists.yml
+ - include_tasks: append_hash.yml
+
+ always:
+ - name: Delete all namespaces
+ k8s:
+ state: absent
+ definition:
+ - kind: Namespace
+ apiVersion: v1
+ metadata:
+ name: testing
+ - kind: Namespace
+ apiVersion: v1
+ metadata:
+ name: testing1
+ - kind: Namespace
+ apiVersion: v1
+ metadata:
+ name: testing2
+ - kind: Namespace
+ apiVersion: v1
+ metadata:
+ name: testing3
+ - kind: Namespace
+ apiVersion: v1
+ metadata:
+ name: testing4
+ - kind: Namespace
+ apiVersion: v1
+ metadata:
+ name: testing5
+ - kind: Namespace
+ apiVersion: v1
+ metadata:
+ name: testing6
+ ignore_errors: yes
diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/tasks/info.yml b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/tasks/info.yml
new file mode 100644
index 00000000..2b7fedaf
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/tasks/info.yml
@@ -0,0 +1,167 @@
+---
+- block:
+ - set_fact:
+ wait_namespace: wait
+ k8s_pod_name: pod-info-1
+ multi_pod_one: multi-pod-1
+ multi_pod_two: multi-pod-2
+
+ - name: Ensure namespace exists
+ k8s:
+ definition:
+ apiVersion: v1
+ kind: Namespace
+ metadata:
+ name: "{{ wait_namespace }}"
+
+ - name: Add a simple pod with initContainer
+ k8s:
+ definition:
+ apiVersion: v1
+ kind: Pod
+ metadata:
+ name: "{{ k8s_pod_name }}"
+ namespace: "{{ wait_namespace }}"
+ spec:
+ initContainers:
+ - name: init-01
+ image: python:3.7-alpine
+ command: ['sh', '-c', 'sleep 20']
+ containers:
+ - name: utilitypod-01
+ image: python:3.7-alpine
+ command: ['sh', '-c', 'sleep 360']
+
+ - name: Wait and gather information about new pod
+ k8s_info:
+ name: "{{ k8s_pod_name }}"
+ kind: Pod
+ namespace: "{{ wait_namespace }}"
+ wait: yes
+ wait_sleep: 5
+ wait_timeout: 400
+ register: wait_info
+
+ - name: Assert that pod creation succeeded
+ assert:
+ that:
+ - wait_info is successful
+ - not wait_info.changed
+ - wait_info.resources[0].status.phase == "Running"
+
+ - name: Remove Pod
+ k8s:
+ api_version: v1
+ kind: Pod
+ name: "{{ k8s_pod_name }}"
+ namespace: "{{ wait_namespace }}"
+ state: absent
+ wait: yes
+ ignore_errors: yes
+ register: short_wait_remove_pod
+
+ - name: Check if pod is removed
+ assert:
+ that:
+ - short_wait_remove_pod is successful
+ - short_wait_remove_pod.changed
+
+ - name: Create multiple pod with initContainer
+ k8s:
+ definition:
+ apiVersion: v1
+ kind: Pod
+ metadata:
+ labels:
+ run: multi-box
+ name: "{{ multi_pod_one }}"
+ namespace: "{{ wait_namespace }}"
+ spec:
+ initContainers:
+ - name: init-01
+ image: python:3.7-alpine
+ command: ['sh', '-c', 'sleep 25']
+ containers:
+ - name: multi-pod-01
+ image: python:3.7-alpine
+ command: ['sh', '-c', 'sleep 360']
+
+ - name: Create another pod with same label as previous pod
+ k8s:
+ definition:
+ apiVersion: v1
+ kind: Pod
+ metadata:
+ labels:
+ run: multi-box
+ name: "{{ multi_pod_two }}"
+ namespace: "{{ wait_namespace }}"
+ spec:
+ initContainers:
+ - name: init-02
+ image: python:3.7-alpine
+ command: ['sh', '-c', 'sleep 25']
+ containers:
+ - name: multi-pod-02
+ image: python:3.7-alpine
+ command: ['sh', '-c', 'sleep 360']
+
+ - name: Wait and gather information about new pods
+ k8s_info:
+ kind: Pod
+ namespace: "{{ wait_namespace }}"
+ wait: yes
+ wait_sleep: 5
+ wait_timeout: 400
+ label_selectors:
+ - run == multi-box
+ register: wait_info
+
+ - name: Assert that pod creation succeeded
+ assert:
+ that:
+ - wait_info is successful
+ - not wait_info.changed
+ - wait_info.resources[0].status.phase == "Running"
+ - wait_info.resources[1].status.phase == "Running"
+
+ - name: "Remove Pod {{ multi_pod_one }}"
+ k8s:
+ api_version: v1
+ kind: Pod
+ name: "{{ multi_pod_one }}"
+ namespace: "{{ wait_namespace }}"
+ state: absent
+ wait: yes
+ ignore_errors: yes
+ register: multi_pod_one_remove
+
+ - name: "Check if {{ multi_pod_one }} pod is removed"
+ assert:
+ that:
+ - multi_pod_one_remove is successful
+ - multi_pod_one_remove.changed
+
+ - name: "Remove Pod {{ multi_pod_two }}"
+ k8s:
+ api_version: v1
+ kind: Pod
+ name: "{{ multi_pod_two }}"
+ namespace: "{{ wait_namespace }}"
+ state: absent
+ wait: yes
+ ignore_errors: yes
+ register: multi_pod_two_remove
+
+ - name: "Check if {{ multi_pod_two }} pod is removed"
+ assert:
+ that:
+ - multi_pod_two_remove is successful
+ - multi_pod_two_remove.changed
+
+ always:
+ - name: Remove namespace
+ k8s:
+ kind: Namespace
+ name: "{{ wait_namespace }}"
+ state: absent
diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/tasks/lists.yml b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/tasks/lists.yml
new file mode 100644
index 00000000..9538d011
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/tasks/lists.yml
@@ -0,0 +1,139 @@
+---
+- name: Ensure testing1 namespace exists
+ k8s:
+ api_version: v1
+ kind: Namespace
+ name: testing1
+
+- block:
+ - name: Create configmaps
+ k8s:
+ namespace: testing1
+ definition:
+ apiVersion: v1
+ kind: ConfigMapList
+ items: '{{ configmaps }}'
+
+ - name: Get ConfigMaps
+ k8s_info:
+ api_version: v1
+ kind: ConfigMap
+ namespace: testing1
+ label_selectors:
+ - app=test
+ register: cms
+
+ - name: All three configmaps should exist
+ assert:
+ that: item.data.a is defined
+ with_items: '{{ cms.resources }}'
+
+ - name: Delete configmaps
+ k8s:
+ state: absent
+ namespace: testing1
+ definition:
+ apiVersion: v1
+ kind: ConfigMapList
+ items: '{{ configmaps }}'
+
+ - name: Get ConfigMaps
+ k8s_info:
+ api_version: v1
+ kind: ConfigMap
+ namespace: testing1
+ label_selectors:
+ - app=test
+ register: cms
+
+ - name: All three configmaps should not exist
+ assert:
+ that: not cms.resources
+ vars:
+ configmaps:
+ - metadata:
+ name: list-example-1
+ labels:
+ app: test
+ data:
+ a: first
+ - metadata:
+ name: list-example-2
+ labels:
+ app: test
+ data:
+ a: second
+ - metadata:
+ name: list-example-3
+ labels:
+ app: test
+ data:
+ a: third
+
+- block:
+ - name: Create list of arbitrary resources
+ k8s:
+ namespace: testing1
+ definition:
+ apiVersion: v1
+ kind: List
+ namespace: testing1
+ items: '{{ resources }}'
+
+ - name: Get the created resources
+ k8s_info:
+ api_version: '{{ item.apiVersion }}'
+ kind: '{{ item.kind }}'
+ namespace: testing1
+ name: '{{ item.metadata.name }}'
+ register: list_resources
+ with_items: '{{ resources }}'
+
+ - name: All resources should exist
+ assert:
+ that: ((list_resources.results | sum(attribute="resources", start=[])) | length) == (resources | length)
+
+ - name: Delete list of arbitrary resources
+ k8s:
+ state: absent
+ namespace: testing1
+ definition:
+ apiVersion: v1
+ kind: List
+ namespace: testing1
+ items: '{{ resources }}'
+
+ - name: Get the resources
+ k8s_info:
+ api_version: '{{ item.apiVersion }}'
+ kind: '{{ item.kind }}'
+ namespace: testing1
+ name: '{{ item.metadata.name }}'
+ register: list_resources
+ with_items: '{{ resources }}'
+
+ - name: The resources should not exist
+ assert:
+ that: not ((list_resources.results | sum(attribute="resources", start=[])) | length)
+ vars:
+ resources:
+ - apiVersion: v1
+ kind: ConfigMap
+ metadata:
+ name: list-example-4
+ data:
+ key: value
+ - apiVersion: v1
+ kind: Service
+ metadata:
+ name: list-example-svc
+ labels:
+ app: test
+ spec:
+ selector:
+ app: test
+ ports:
+ - protocol: TCP
+ targetPort: 8000
+ name: port-8000-tcp
+ port: 8000
diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/tasks/log.yml b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/tasks/log.yml
new file mode 100644
index 00000000..d3da05d3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/tasks/log.yml
@@ -0,0 +1,124 @@
+---
+- block:
+ - name: ensure that k8s-log namespace exists
+ k8s:
+ kind: Namespace
+ name: k8s-log
+
+ - name: create hello-world deployment
+ k8s:
+ wait: yes
+ definition:
+ apiVersion: apps/v1
+ kind: Deployment
+ metadata:
+ name: hello-world
+ namespace: k8s-log
+ spec:
+ selector:
+ matchLabels:
+ app: hello-world
+ template:
+ metadata:
+ labels:
+ app: hello-world
+ spec:
+ containers:
+ - image: busybox
+ name: hello-world
+ command: ['sh']
+ args: ['-c', 'while true ; do echo "hello world" && sleep 10 ; done']
+ restartPolicy: Always
+
+ - name: retrieve the log by providing the deployment
+ k8s_log:
+ api_version: apps/v1
+ kind: Deployment
+ namespace: k8s-log
+ name: hello-world
+ register: deployment_log
+
+ - name: verify that the log can be retrieved via the deployment
+ assert:
+ that:
+ - "'hello world' in deployment_log.log"
+ - item == 'hello world' or item == ''
+ with_items: '{{ deployment_log.log_lines }}'
+
+ - name: retrieve the log with a label selector
+ k8s_log:
+ namespace: k8s-log
+ label_selectors:
+ - 'app=hello-world'
+ register: label_selector_log
+
+ - name: verify that the log can be retrieved via the label
+ assert:
+ that:
+ - "'hello world' in label_selector_log.log"
+ - item == 'hello world' or item == ''
+ with_items: '{{ label_selector_log.log_lines }}'
+
+ - name: get the hello-world pod
+ k8s_info:
+ kind: Pod
+ namespace: k8s-log
+ label_selectors:
+ - 'app=hello-world'
+ register: k8s_log_pods
+
+ - name: retrieve the log directly with the pod name
+ k8s_log:
+ namespace: k8s-log
+ name: '{{ k8s_log_pods.resources.0.metadata.name }}'
+ register: pod_log
+
+ - name: verify that the log can be retrieved via the pod name
+ assert:
+ that:
+ - "'hello world' in pod_log.log"
+ - item == 'hello world' or item == ''
+ with_items: '{{ pod_log.log_lines }}'
+
+ - name: Create a job that calculates 7
+ k8s:
+ state: present
+ wait: yes
+ wait_timeout: 120
+ wait_condition:
+ type: Complete
+ status: 'True'
+ definition:
+ apiVersion: batch/v1
+ kind: Job
+ metadata:
+ name: int-log
+ namespace: k8s-log
+ spec:
+ template:
+ spec:
+ containers:
+ - name: busybox
+ image: busybox
+ command: ["echo", "7"]
+ restartPolicy: Never
+ backoffLimit: 4
+
+ - name: retrieve logs from the job
+ k8s_log:
+ api_version: batch/v1
+ kind: Job
+ namespace: k8s-log
+ name: int-log
+ register: job_logs
+
+ - name: verify the log was successfully retrieved
+ assert:
+ that: job_logs.log_lines[0] == "7"
+
+ always:
+ - name: ensure that namespace is removed
+ k8s:
+ kind: Namespace
+ name: k8s-log
+ state: absent
diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/tasks/rollback.yml b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/tasks/rollback.yml
new file mode 100644
index 00000000..743ff53c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/tasks/rollback.yml
@@ -0,0 +1,217 @@
+---
+- block:
+ - name: Set variables
+ set_fact:
+ namespace: "testingrollback"
+
+ - name: Create a namespace
+ k8s:
+ name: "{{ namespace }}"
+ kind: Namespace
+ api_version: v1
+ apply: no
+ register: output
+
+ - name: show output
+ debug:
+ var: output
+
+ - name: Create a deployment
+ k8s:
+ state: present
+ wait: yes
+ inline: &deploy
+ apiVersion: apps/v1
+ kind: Deployment
+ metadata:
+ name: nginx-deploy
+ labels:
+ app: nginx
+ namespace: "{{ namespace }}"
+ spec:
+ replicas: 3
+ selector:
+ matchLabels:
+ app: nginx
+ template:
+ metadata:
+ labels:
+ app: nginx
+ spec:
+ containers:
+ - name: nginx
+ image: nginx:1.17
+ ports:
+ - containerPort: 80
+ register: output
+
+ - name: Show output
+ debug:
+ var: output
+
+ - name: Crash the existing deployment
+ k8s:
+ state: present
+ wait: yes
+ definition:
+ apiVersion: apps/v1
+ kind: Deployment
+ metadata:
+ name: nginx-deploy
+ labels:
+ app: nginx
+ namespace: "{{ namespace }}"
+ spec:
+ replicas: 3
+ selector:
+ matchLabels:
+ app: nginx
+ template:
+ metadata:
+ labels:
+ app: nginx
+ spec:
+ containers:
+ - name: nginx
+ image: nginx:1.0.23449928384992872784
+ ports:
+ - containerPort: 80
+ ignore_errors: yes
+ register: output
+
+ - name: Rolling Back the crashed deployment
+ k8s_rollback:
+ api_version: apps/v1
+ kind: Deployment
+ name: nginx-deploy
+ namespace: "{{ namespace }}"
+ when: output.failed
+ register: output
+
+ - name: Show output
+ debug:
+ var: output
+
+ - name: Create a DaemonSet
+ k8s:
+ state: present
+ wait: yes
+ definition:
+ apiVersion: apps/v1
+ kind: DaemonSet
+ metadata:
+ name: fluentd-elasticsearch
+ namespace: "{{ namespace }}"
+ labels:
+ k8s-app: fluentd-logging
+ spec:
+ selector:
+ matchLabels:
+ name: fluentd-elasticsearch
+ template:
+ metadata:
+ labels:
+ name: fluentd-elasticsearch
+ spec:
+ tolerations:
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ containers:
+ - name: fluentd-elasticsearch
+ image: quay.io/fluentd_elasticsearch/fluentd:v2.5.2
+ resources:
+ limits:
+ memory: 200Mi
+ requests:
+ cpu: 100m
+ memory: 200Mi
+ volumeMounts:
+ - name: varlog
+ mountPath: /var/log
+ - name: varlibdockercontainers
+ mountPath: /var/lib/docker/containers
+ readOnly: true
+ terminationGracePeriodSeconds: 30
+ volumes:
+ - name: varlog
+ hostPath:
+ path: /var/log
+ - name: varlibdockercontainers
+ hostPath:
+ path: /var/lib/docker/containers
+ register: output
+
+ - name: Show output
+ debug:
+ var: output
+
+ - name: Crash the existing DaemonSet
+ k8s:
+ state: present
+ wait: yes
+ definition:
+ apiVersion: apps/v1
+ kind: DaemonSet
+ metadata:
+ name: fluentd-elasticsearch
+ namespace: "{{ namespace }}"
+ labels:
+ k8s-app: fluentd-logging
+ spec:
+ selector:
+ matchLabels:
+ name: fluentd-elasticsearch
+ template:
+ metadata:
+ labels:
+ name: fluentd-elasticsearch
+ spec:
+ tolerations:
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ containers:
+ - name: fluentd-elasticsearch
+ image: quay.io/fluentd_elasticsearch/fluentd:v2734894949
+ resources:
+ limits:
+ memory: 200Mi
+ requests:
+ cpu: 100m
+ memory: 200Mi
+ volumeMounts:
+ - name: varlog
+ mountPath: /var/log
+ - name: varlibdockercontainers
+ mountPath: /var/lib/docker/containers
+ readOnly: true
+ terminationGracePeriodSeconds: 30
+ volumes:
+ - name: varlog
+ hostPath:
+ path: /var/log
+ - name: varlibdockercontainers
+ hostPath:
+ path: /var/lib/docker/containers
+ ignore_errors: yes
+ register: output
+
+ - name: Rolling Back the crashed DaemonSet
+ k8s_rollback:
+ api_version: apps/v1
+ kind: DaemonSet
+ name: fluentd-elasticsearch
+ namespace: "{{ namespace }}"
+ when: output.failed
+ register: output
+
+ - name: Show output
+ debug:
+ var: output
+
+ always:
+ - name: Delete {{ namespace }} namespace
+ k8s:
+ name: "{{ namespace }}"
+ kind: Namespace
+ api_version: v1
+ state: absent
diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/tasks/scale.yml b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/tasks/scale.yml
new file mode 100644
index 00000000..32b718df
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/tasks/scale.yml
@@ -0,0 +1,210 @@
+---
+- block:
+ - set_fact:
+ scale_namespace: scale
+
+ - name: Ensure namespace exists
+ k8s:
+ definition:
+ apiVersion: v1
+ kind: Namespace
+ metadata:
+ name: "{{ scale_namespace }}"
+
+ - name: Add a deployment
+ k8s:
+ definition:
+ apiVersion: apps/v1
+ kind: Deployment
+ metadata:
+ name: scale-deploy
+ namespace: "{{ scale_namespace }}"
+ spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: "{{ k8s_pod_name }}"
+ template: "{{ k8s_pod_template }}"
+ wait: yes
+ wait_timeout: 60
+ apply: yes
+ vars:
+ k8s_pod_name: scale-deploy
+ k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:v0.10.0-green
+ k8s_pod_ports:
+ - containerPort: 8080
+ name: http
+ protocol: TCP
+
+ - name: Get pods in scale-deploy
+ k8s_info:
+ kind: Pod
+ label_selectors:
+ - app=scale-deploy
+ namespace: "{{ scale_namespace }}"
+ field_selectors:
+ - status.phase=Running
+
+ - name: Scale the deployment
+ k8s_scale:
+ api_version: apps/v1
+ kind: Deployment
+ name: scale-deploy
+ namespace: "{{ scale_namespace }}"
+ replicas: 0
+ wait: yes
+ register: scale_down
+
+ - name: Get pods in scale-deploy
+ k8s_info:
+ kind: Pod
+ label_selectors:
+ - app=scale-deploy
+ namespace: "{{ scale_namespace }}"
+ field_selectors:
+ - status.phase=Running
+ register: scale_down_deploy_pods
+ until: "{{ scale_down_deploy_pods.resources | length == 0 }}"
+ retries: 6
+ delay: 5
+
+ - name: Ensure that scale down took effect
+ assert:
+ that:
+ - scale_down is changed
+ - '"duration" in scale_down'
+ - scale_down.diff
+
+ - name: Reapply the earlier deployment
+ k8s:
+ definition:
+ api_version: apps/v1
+ kind: Deployment
+ metadata:
+ name: scale-deploy
+ namespace: "{{ scale_namespace }}"
+ spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: "{{ k8s_pod_name }}"
+ template: "{{ k8s_pod_template }}"
+ wait: yes
+ wait_timeout: 60
+ apply: yes
+ vars:
+ k8s_pod_name: scale-deploy
+ k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:v0.10.0-green
+ k8s_pod_ports:
+ - containerPort: 8080
+ name: http
+ protocol: TCP
+ register: reapply_after_scale
+
+ - name: Get pods in scale-deploy
+ k8s_info:
+ kind: Pod
+ label_selectors:
+ - app=scale-deploy
+ namespace: "{{ scale_namespace }}"
+ field_selectors:
+ - status.phase=Running
+ register: scale_up_deploy_pods
+
+ - name: Ensure that reapply after scale worked
+ assert:
+ that:
+ - reapply_after_scale is changed
+ - scale_up_deploy_pods.resources | length == 1
+
+ - name: Scale the deployment up
+ k8s_scale:
+ api_version: apps/v1
+ kind: Deployment
+ name: scale-deploy
+ namespace: "{{ scale_namespace }}"
+ replicas: 2
+ wait: yes
+ wait_timeout: 60
+ register: scale_up
+
+ - name: Get pods in scale-deploy
+ k8s_info:
+ kind: Pod
+ label_selectors:
+ - app=scale-deploy
+ field_selectors:
+ - status.phase=Running
+ namespace: "{{ scale_namespace }}"
+ register: scale_up_further_deploy_pods
+
+ - name: Ensure that scale up worked
+ assert:
+ that:
+ - scale_up is changed
+ - '"duration" in scale_up'
+ - scale_up.diff
+ - scale_up_further_deploy_pods.resources | length == 2
+
+ - name: Don't scale the deployment up
+ k8s_scale:
+ api_version: apps/v1
+ kind: Deployment
+ name: scale-deploy
+ namespace: "{{ scale_namespace }}"
+ replicas: 2
+ wait: yes
+ register: scale_up_noop
+
+ - name: Get pods in scale-deploy
+ k8s_info:
+ kind: Pod
+ label_selectors:
+ - app=scale-deploy
+ field_selectors:
+ - status.phase=Running
+ namespace: "{{ scale_namespace }}"
+ register: scale_up_noop_pods
+
+ - name: Ensure that no-op scale up worked
+ assert:
+ that:
+ - scale_up_noop is not changed
+ - not scale_up_noop.diff
+ - scale_up_noop_pods.resources | length == 2
+ - '"duration" in scale_up_noop'
+
+ - name: Scale deployment down without wait
+ k8s_scale:
+ api_version: apps/v1
+ kind: Deployment
+ name: scale-deploy
+ namespace: "{{ scale_namespace }}"
+ replicas: 1
+ wait: no
+ register: scale_down_no_wait
+
+ - name: Ensure that scale down succeeds
+ k8s_info:
+ kind: Pod
+ label_selectors:
+ - app=scale-deploy
+ namespace: "{{ scale_namespace }}"
+ register: scale_down_no_wait_pods
+ retries: 6
+ delay: 5
+ until: "{{ scale_down_no_wait_pods.resources | length == 1 }}"
+
+ - name: Ensure that scale down without wait worked
+ assert:
+ that:
+ - scale_down_no_wait is changed
+ - scale_down_no_wait.diff
+ - scale_down_no_wait_pods.resources | length == 1
+
+ always:
+ - name: Remove namespace
+ k8s:
+ kind: Namespace
+ name: "{{ scale_namespace }}"
+ state: absent
diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/tasks/template.yml b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/tasks/template.yml
new file mode 100644
index 00000000..4d76d799
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/tasks/template.yml
@@ -0,0 +1,167 @@
+---
+- block:
+ - set_fact:
+ template_namespace: template-test
+
+ - name: Ensure namespace exists
+ k8s:
+ definition:
+ apiVersion: v1
+ kind: Namespace
+ metadata:
+ name: "{{ template_namespace }}"
+
+ - name: Check if k8s_service does not inherit parameter
+ community.kubernetes.k8s_service:
+ template: "pod_template_one.j2"
+ state: present
+ ignore_errors: yes
+ register: r
+
+ - name: Check for expected failures in last tasks
+ assert:
+ that:
+ - r.failed
+ - "'is only supported parameter for' in r.msg"
+
+ - name: Specify both definition and template
+ community.kubernetes.k8s:
+ state: present
+ template: "pod_template_one.j2"
+ definition:
+ apiVersion: apps/v1
+ kind: Deployment
+ metadata:
+ name: apply-deploy
+ namespace: "{{ template_namespace }}"
+ spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: "{{ k8s_pod_name }}"
+ vars:
+ k8s_pod_name: pod
+ k8s_pod_namespace: "{{ template_namespace }}"
+ register: r
+ ignore_errors: yes
+
+ - name: Check if definition and template are mutually exclusive
+ assert:
+ that:
+ - r.failed
+ - "'parameters are mutually exclusive' in r.msg"
+
+ - name: Specify both src and template
+ community.kubernetes.k8s:
+ state: present
+ src: "../templates/pod_template_one.j2"
+ template: "pod_template_one.j2"
+ vars:
+ k8s_pod_name: pod
+ k8s_pod_namespace: "{{ template_namespace }}"
+ register: r
+ ignore_errors: yes
+
+ - name: Check if src and template are mutually exclusive
+ assert:
+ that:
+ - r.failed
+ - "'parameters are mutually exclusive' in r.msg"
+
+ - name: Create pod using template (direct specification)
+ community.kubernetes.k8s:
+ template: "pod_template_one.j2"
+ wait: yes
+ vars:
+ k8s_pod_name: pod-1
+ k8s_pod_namespace: "{{ template_namespace }}"
+ register: r
+
+ - name: Assert that pod creation succeeded using template
+ assert:
+ that:
+ - r is successful
+
+ - name: Create pod using template with wrong parameter
+ community.kubernetes.k8s:
+ template:
+ - default
+ wait: yes
+ vars:
+ k8s_pod_name: pod-2
+ k8s_pod_namespace: "{{ template_namespace }}"
+ register: r
+ ignore_errors: True
+
+ - name: Assert that pod creation failed using template due to wrong parameter
+ assert:
+ that:
+ - r is failed
+ - "'Error while reading template file' in r.msg"
+
+ - name: Create pod using template (path parameter)
+ community.kubernetes.k8s:
+ template:
+ path: "pod_template_one.j2"
+ wait: yes
+ vars:
+ k8s_pod_name: pod-3
+ k8s_pod_namespace: "{{ template_namespace }}"
+ register: r
+
+ - name: Assert that pod creation succeeded using template
+ assert:
+ that:
+ - r is successful
+
+ - name: Create pod using template (different variable string)
+ community.kubernetes.k8s:
+ template:
+ path: "pod_template_two.j2"
+ variable_start_string: '[['
+ variable_end_string: ']]'
+ wait: yes
+ vars:
+ k8s_pod_name: pod-4
+ k8s_pod_namespace: "[[ template_namespace ]]"
+ ansible_python_interpreter: "[[ ansible_playbook_python ]]"
+ register: r
+
+ - name: Assert that pod creation succeeded using template
+ assert:
+ that:
+ - r is successful
+
+ - name: Create pods using multi-resource template
+ community.kubernetes.k8s:
+ template:
+ path: "pod_template_three.j2"
+ wait: yes
+ vars:
+ k8s_pod_name_one: pod-5
+ k8s_pod_name_two: pod-6
+ k8s_pod_namespace: "{{ template_namespace }}"
+ register: r
+
+ - name: Assert that pod creation succeeded using template
+ assert:
+ that:
+ - r is successful
+
+ - name: Remove Pod (Cleanup)
+ k8s:
+ api_version: v1
+ kind: Pod
+ name: "pod-{{ item }}"
+ namespace: "{{ template_namespace }}"
+ state: absent
+ wait: yes
+ ignore_errors: yes
+ loop: "{{ range(1, 7) | list }}"
+
+ always:
+ - name: Remove namespace (Cleanup)
+ k8s:
+ kind: Namespace
+ name: "{{ template_namespace }}"
+ state: absent
diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/tasks/waiter.yml b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/tasks/waiter.yml
new file mode 100644
index 00000000..4049f6ef
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/tasks/waiter.yml
@@ -0,0 +1,363 @@
+---
+- block:
+ - set_fact:
+ wait_namespace: wait
+
+ - name: Ensure namespace exists
+ k8s:
+ definition:
+ apiVersion: v1
+ kind: Namespace
+ metadata:
+ name: "{{ wait_namespace }}"
+
+ - name: Add a simple pod
+ k8s:
+ definition:
+ apiVersion: v1
+ kind: Pod
+ metadata:
+ name: "{{ k8s_pod_name }}"
+ namespace: "{{ wait_namespace }}"
+ spec: "{{ k8s_pod_spec }}"
+ wait: yes
+ vars:
+ k8s_pod_name: wait-pod
+ k8s_pod_image: alpine:3.8
+ k8s_pod_command:
+ - sleep
+ - "10000"
+ register: wait_pod
+ ignore_errors: yes
+
+ - name: Assert that pod creation succeeded
+ assert:
+ that:
+ - wait_pod is successful
+
+ - name: Add a daemonset
+ k8s:
+ definition:
+ apiVersion: apps/v1
+ kind: DaemonSet
+ metadata:
+ name: wait-daemonset
+ namespace: "{{ wait_namespace }}"
+ spec:
+ selector:
+ matchLabels:
+ app: "{{ k8s_pod_name }}"
+ template: "{{ k8s_pod_template }}"
+ wait: yes
+ wait_sleep: 5
+ wait_timeout: 180
+ vars:
+ k8s_pod_name: wait-ds
+ k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:1
+ register: ds
+
+ - name: Check that daemonset wait worked
+ assert:
+ that:
+ - ds.result.status.currentNumberScheduled == ds.result.status.desiredNumberScheduled
+
+ - name: Update a daemonset in check_mode
+ k8s:
+ definition:
+ apiVersion: apps/v1
+ kind: DaemonSet
+ metadata:
+ name: wait-daemonset
+ namespace: "{{ wait_namespace }}"
+ spec:
+ selector:
+ matchLabels:
+ app: "{{ k8s_pod_name }}"
+ updateStrategy:
+ type: RollingUpdate
+ template: "{{ k8s_pod_template }}"
+ wait: yes
+ wait_sleep: 3
+ wait_timeout: 180
+ vars:
+ k8s_pod_name: wait-ds
+ k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:2
+ register: update_ds_check_mode
+ check_mode: yes
+
+ - name: Check that check_mode result contains the changes
+ assert:
+ that:
+ - update_ds_check_mode is changed
+ - "update_ds_check_mode.result.spec.template.spec.containers[0].image == 'gcr.io/kuar-demo/kuard-amd64:2'"
+
+ - name: Update a daemonset
+ k8s:
+ definition:
+ apiVersion: apps/v1
+ kind: DaemonSet
+ metadata:
+ name: wait-daemonset
+ namespace: "{{ wait_namespace }}"
+ spec:
+ selector:
+ matchLabels:
+ app: "{{ k8s_pod_name }}"
+ updateStrategy:
+ type: RollingUpdate
+ template: "{{ k8s_pod_template }}"
+ wait: yes
+ wait_sleep: 3
+ wait_timeout: 180
+ vars:
+ k8s_pod_name: wait-ds
+ k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:3
+ register: ds
+
+ - name: Get updated pods
+ k8s_info:
+ api_version: v1
+ kind: Pod
+ namespace: "{{ wait_namespace }}"
+ label_selectors:
+ - app=wait-ds
+ field_selectors:
+ - status.phase=Running
+ register: updated_ds_pods
+
+ - name: Check that daemonset wait worked
+ assert:
+ that:
+ - ds.result.status.currentNumberScheduled == ds.result.status.desiredNumberScheduled
+ - updated_ds_pods.resources[0].spec.containers[0].image.endswith(":3")
+
+ - name: Add a crashing pod
+ k8s:
+ definition:
+ apiVersion: v1
+ kind: Pod
+ metadata:
+ name: "{{ k8s_pod_name }}"
+ namespace: "{{ wait_namespace }}"
+ spec: "{{ k8s_pod_spec }}"
+ wait: yes
+ wait_sleep: 1
+ wait_timeout: 30
+ vars:
+ k8s_pod_name: wait-crash-pod
+ k8s_pod_image: alpine:3.8
+ k8s_pod_command:
+ - /bin/false
+ register: crash_pod
+ ignore_errors: yes
+
+ - name: Check that task failed
+ assert:
+ that:
+ - crash_pod is failed
+
+ - name: Use a non-existent image
+ k8s:
+ definition:
+ apiVersion: v1
+ kind: Pod
+ metadata:
+ name: "{{ k8s_pod_name }}"
+ namespace: "{{ wait_namespace }}"
+ spec: "{{ k8s_pod_spec }}"
+ wait: yes
+ wait_sleep: 1
+ wait_timeout: 30
+ vars:
+ k8s_pod_name: wait-no-image-pod
+ k8s_pod_image: i_made_this_up:and_this_too
+ register: no_image_pod
+ ignore_errors: yes
+
+ - name: Check that task failed
+ assert:
+ that:
+ - no_image_pod is failed
+
+ - name: Add a deployment
+ k8s:
+ definition:
+ apiVersion: apps/v1
+ kind: Deployment
+ metadata:
+ name: wait-deploy
+ namespace: "{{ wait_namespace }}"
+ spec:
+ replicas: 3
+ selector:
+ matchLabels:
+ app: "{{ k8s_pod_name }}"
+ template: "{{ k8s_pod_template }}"
+ wait: yes
+ vars:
+ k8s_pod_name: wait-deploy
+ k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:1
+ k8s_pod_ports:
+ - containerPort: 8080
+ name: http
+ protocol: TCP
+
+ register: deploy
+
+ - name: Check that deployment wait worked
+ assert:
+ that:
+ - deploy.result.status.availableReplicas == deploy.result.status.replicas
+
+ - name: Update a deployment
+ k8s:
+ definition:
+ apiVersion: apps/v1
+ kind: Deployment
+ metadata:
+ name: wait-deploy
+ namespace: "{{ wait_namespace }}"
+ spec:
+ replicas: 3
+ selector:
+ matchLabels:
+ app: "{{ k8s_pod_name }}"
+ template: "{{ k8s_pod_template }}"
+ wait: yes
+ vars:
+ k8s_pod_name: wait-deploy
+ k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:2
+ k8s_pod_ports:
+ - containerPort: 8080
+ name: http
+ protocol: TCP
+ register: update_deploy
+
+ # It looks like the Deployment is updated to have the desired state *before* the pods are terminated
+ # Wait a couple of seconds to allow the old pods to at least get to Terminating state
+ - name: Avoid race condition
+ pause:
+ seconds: 2
+
+ - name: Get updated pods
+ k8s_info:
+ api_version: v1
+ kind: Pod
+ namespace: "{{ wait_namespace }}"
+ label_selectors:
+ - app=wait-deploy
+ field_selectors:
+ - status.phase=Running
+ register: updated_deploy_pods
+ until: "{{ updated_deploy_pods.resources[0].spec.containers[0].image.endswith(':2') }}"
+ retries: 6
+ delay: 5
+
+ - name: Check that deployment wait worked
+ assert:
+ that:
+ - deploy.result.status.availableReplicas == deploy.result.status.replicas
+
+ - name: Pause a deployment
+ k8s:
+ definition:
+ apiVersion: apps/v1
+ kind: Deployment
+ metadata:
+ name: wait-deploy
+ namespace: "{{ wait_namespace }}"
+ spec:
+ paused: True
+ apply: no
+ wait: yes
+ wait_condition:
+ type: Progressing
+ status: Unknown
+ reason: DeploymentPaused
+ register: pause_deploy
+
+ - name: Check that paused deployment wait worked
+ assert:
+ that:
+ - condition.reason == "DeploymentPaused"
+ - condition.status == "Unknown"
+ vars:
+ condition: '{{ pause_deploy.result.status.conditions[1] }}'
+
+ - name: Add a service based on the deployment
+ k8s:
+ definition:
+ apiVersion: v1
+ kind: Service
+ metadata:
+ name: wait-svc
+ namespace: "{{ wait_namespace }}"
+ spec:
+ selector:
+ app: "{{ k8s_pod_name }}"
+ ports:
+ - port: 8080
+ targetPort: 8080
+ protocol: TCP
+ wait: yes
+ vars:
+ k8s_pod_name: wait-deploy
+ register: service
+
+ - name: Assert that waiting for service works
+ assert:
+ that:
+ - service is successful
+
+ - name: Add a crashing deployment
+ k8s:
+ definition:
+ apiVersion: apps/v1
+ kind: Deployment
+ metadata:
+ name: wait-crash-deploy
+ namespace: "{{ wait_namespace }}"
+ spec:
+ replicas: 3
+ selector:
+ matchLabels:
+ app: "{{ k8s_pod_name }}"
+ template: "{{ k8s_pod_template }}"
+ wait: yes
+ vars:
+ k8s_pod_name: wait-crash-deploy
+ k8s_pod_image: alpine:3.8
+ k8s_pod_command:
+ - /bin/false
+ register: wait_crash_deploy
+ ignore_errors: yes
+
+ - name: Check that task failed
+ assert:
+ that:
+ - wait_crash_deploy is failed
+
+ - name: Remove Pod with very short timeout
+ k8s:
+ api_version: v1
+ kind: Pod
+ name: wait-pod
+ namespace: "{{ wait_namespace }}"
+ state: absent
+ wait: yes
+ wait_sleep: 2
+ wait_timeout: 5
+ ignore_errors: yes
+ register: short_wait_remove_pod
+
+ - name: Check that task failed
+ assert:
+ that:
+ - short_wait_remove_pod is failed
+
+ always:
+ - name: Remove namespace
+ k8s:
+ kind: Namespace
+ name: "{{ wait_namespace }}"
+ state: absent
diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/templates/pod_template_one.j2 b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/templates/pod_template_one.j2
new file mode 100644
index 00000000..bafb7d9f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/templates/pod_template_one.j2
@@ -0,0 +1,16 @@
+apiVersion: v1
+kind: Pod
+metadata:
+ labels:
+ app: "{{ k8s_pod_name }}"
+ name: '{{ k8s_pod_name }}'
+ namespace: '{{ k8s_pod_namespace }}'
+spec:
+ containers:
+ - args:
+ - /bin/sh
+ - -c
+ - while true; do echo $(date); sleep 10; done
+ image: python:3.7-alpine
+ imagePullPolicy: Always
+ name: '{{ k8s_pod_name }}'
diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/templates/pod_template_three.j2 b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/templates/pod_template_three.j2
new file mode 100644
index 00000000..06e4686e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/templates/pod_template_three.j2
@@ -0,0 +1,35 @@
+---
+apiVersion: v1
+kind: Pod
+metadata:
+ labels:
+ app: "{{ k8s_pod_name_one }}"
+ name: '{{ k8s_pod_name_one }}'
+ namespace: '{{ k8s_pod_namespace }}'
+spec:
+ containers:
+ - args:
+ - /bin/sh
+ - -c
+ - while true; do echo $(date); sleep 10; done
+ image: python:3.7-alpine
+ imagePullPolicy: Always
+ name: '{{ k8s_pod_name_one }}'
+
+---
+apiVersion: v1
+kind: Pod
+metadata:
+ labels:
+ app: "{{ k8s_pod_name_two }}"
+ name: '{{ k8s_pod_name_two }}'
+ namespace: '{{ k8s_pod_namespace }}'
+spec:
+ containers:
+ - args:
+ - /bin/sh
+ - -c
+ - while true; do echo $(date); sleep 10; done
+ image: python:3.7-alpine
+ imagePullPolicy: Always
+ name: '{{ k8s_pod_name_two }}'
diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/templates/pod_template_two.j2 b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/templates/pod_template_two.j2
new file mode 100644
index 00000000..cef89bf1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/templates/pod_template_two.j2
@@ -0,0 +1,16 @@
+apiVersion: v1
+kind: Pod
+metadata:
+ labels:
+ app: '[[ k8s_pod_name ]]'
+ name: '[[ k8s_pod_name ]]'
+ namespace: '[[ k8s_pod_namespace ]]'
+spec:
+ containers:
+ - args:
+ - /bin/sh
+ - -c
+ - while true; do echo $(date); sleep 10; done
+ image: python:3.7-alpine
+ imagePullPolicy: Always
+ name: '[[ k8s_pod_name ]]'
diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/vars/main.yml b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/vars/main.yml
new file mode 100644
index 00000000..a478de97
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/vars/main.yml
@@ -0,0 +1,40 @@
+---
+k8s_pod_metadata:
+ labels:
+ app: "{{ k8s_pod_name }}"
+
+k8s_pod_spec:
+ serviceAccount: "{{ k8s_pod_service_account }}"
+ containers:
+ - image: "{{ k8s_pod_image }}"
+ imagePullPolicy: Always
+ name: "{{ k8s_pod_name }}"
+ command: "{{ k8s_pod_command }}"
+ readinessProbe:
+ initialDelaySeconds: 15
+ exec:
+ command:
+ - /bin/true
+ resources: "{{ k8s_pod_resources }}"
+ ports: "{{ k8s_pod_ports }}"
+ env: "{{ k8s_pod_env }}"
+
+
+k8s_pod_service_account: default
+
+k8s_pod_resources:
+ limits:
+ cpu: "100m"
+ memory: "100Mi"
+
+k8s_pod_command: []
+
+k8s_pod_ports: []
+
+k8s_pod_env: []
+
+k8s_pod_template:
+ metadata: "{{ k8s_pod_metadata }}"
+ spec: "{{ k8s_pod_spec }}"
+
+kubernetes_role_path: ../../tests/integration/targets/kubernetes