summaryrefslogtreecommitdiffstats
path: root/collections-debian-merged/ansible_collections/community/kubernetes/plugins
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-14 20:03:01 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-14 20:03:01 +0000
commita453ac31f3428614cceb99027f8efbdb9258a40b (patch)
treef61f87408f32a8511cbd91799f9cececb53e0374 /collections-debian-merged/ansible_collections/community/kubernetes/plugins
parentInitial commit. (diff)
downloadansible-upstream.tar.xz
ansible-upstream.zip
Adding upstream version 2.10.7+merged+base+2.10.8+dfsg.upstream/2.10.7+merged+base+2.10.8+dfsgupstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'collections-debian-merged/ansible_collections/community/kubernetes/plugins')
-rw-r--r--collections-debian-merged/ansible_collections/community/kubernetes/plugins/action/helm.py198
-rw-r--r--collections-debian-merged/ansible_collections/community/kubernetes/plugins/action/helm_info.py198
-rw-r--r--collections-debian-merged/ansible_collections/community/kubernetes/plugins/action/helm_plugin.py198
-rw-r--r--collections-debian-merged/ansible_collections/community/kubernetes/plugins/action/helm_plugin_info.py198
-rw-r--r--collections-debian-merged/ansible_collections/community/kubernetes/plugins/action/helm_repository.py198
-rw-r--r--collections-debian-merged/ansible_collections/community/kubernetes/plugins/action/k8s.py198
-rw-r--r--collections-debian-merged/ansible_collections/community/kubernetes/plugins/action/k8s_auth.py198
-rw-r--r--collections-debian-merged/ansible_collections/community/kubernetes/plugins/action/k8s_exec.py198
-rw-r--r--collections-debian-merged/ansible_collections/community/kubernetes/plugins/action/k8s_info.py198
-rw-r--r--collections-debian-merged/ansible_collections/community/kubernetes/plugins/action/k8s_log.py198
-rw-r--r--collections-debian-merged/ansible_collections/community/kubernetes/plugins/action/k8s_scale.py198
-rw-r--r--collections-debian-merged/ansible_collections/community/kubernetes/plugins/action/k8s_service.py198
-rw-r--r--collections-debian-merged/ansible_collections/community/kubernetes/plugins/connection/kubectl.py369
-rw-r--r--collections-debian-merged/ansible_collections/community/kubernetes/plugins/doc_fragments/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/community/kubernetes/plugins/doc_fragments/helm_common_options.py34
-rw-r--r--collections-debian-merged/ansible_collections/community/kubernetes/plugins/doc_fragments/k8s_auth_options.py97
-rw-r--r--collections-debian-merged/ansible_collections/community/kubernetes/plugins/doc_fragments/k8s_name_options.py52
-rw-r--r--collections-debian-merged/ansible_collections/community/kubernetes/plugins/doc_fragments/k8s_resource_options.py33
-rw-r--r--collections-debian-merged/ansible_collections/community/kubernetes/plugins/doc_fragments/k8s_scale_options.py43
-rw-r--r--collections-debian-merged/ansible_collections/community/kubernetes/plugins/doc_fragments/k8s_state_options.py30
-rw-r--r--collections-debian-merged/ansible_collections/community/kubernetes/plugins/doc_fragments/k8s_wait_options.py67
-rw-r--r--collections-debian-merged/ansible_collections/community/kubernetes/plugins/filter/k8s.py33
-rw-r--r--collections-debian-merged/ansible_collections/community/kubernetes/plugins/inventory/k8s.py363
-rw-r--r--collections-debian-merged/ansible_collections/community/kubernetes/plugins/inventory/openshift.py202
-rw-r--r--collections-debian-merged/ansible_collections/community/kubernetes/plugins/lookup/k8s.py287
-rw-r--r--collections-debian-merged/ansible_collections/community/kubernetes/plugins/module_utils/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/community/kubernetes/plugins/module_utils/common.py818
-rw-r--r--collections-debian-merged/ansible_collections/community/kubernetes/plugins/module_utils/raw.py97
-rw-r--r--collections-debian-merged/ansible_collections/community/kubernetes/plugins/module_utils/scale.py166
-rw-r--r--collections-debian-merged/ansible_collections/community/kubernetes/plugins/modules/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/community/kubernetes/plugins/modules/helm.py573
-rw-r--r--collections-debian-merged/ansible_collections/community/kubernetes/plugins/modules/helm_info.py205
-rw-r--r--collections-debian-merged/ansible_collections/community/kubernetes/plugins/modules/helm_plugin.py242
-rw-r--r--collections-debian-merged/ansible_collections/community/kubernetes/plugins/modules/helm_plugin_info.py167
-rw-r--r--collections-debian-merged/ansible_collections/community/kubernetes/plugins/modules/helm_repository.py212
-rw-r--r--collections-debian-merged/ansible_collections/community/kubernetes/plugins/modules/k8s.py320
-rw-r--r--collections-debian-merged/ansible_collections/community/kubernetes/plugins/modules/k8s_auth.py336
-rw-r--r--collections-debian-merged/ansible_collections/community/kubernetes/plugins/modules/k8s_cluster_info.py247
-rw-r--r--collections-debian-merged/ansible_collections/community/kubernetes/plugins/modules/k8s_exec.py205
-rw-r--r--collections-debian-merged/ansible_collections/community/kubernetes/plugins/modules/k8s_info.py200
-rw-r--r--collections-debian-merged/ansible_collections/community/kubernetes/plugins/modules/k8s_log.py236
-rw-r--r--collections-debian-merged/ansible_collections/community/kubernetes/plugins/modules/k8s_rollback.py224
-rw-r--r--collections-debian-merged/ansible_collections/community/kubernetes/plugins/modules/k8s_scale.py129
-rw-r--r--collections-debian-merged/ansible_collections/community/kubernetes/plugins/modules/k8s_service.py272
44 files changed, 8635 insertions, 0 deletions
diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/plugins/action/helm.py b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/action/helm.py
new file mode 100644
index 00000000..6b26225f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/action/helm.py
@@ -0,0 +1,198 @@
+# Copyright (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+# Copyright (c) 2017, Toshio Kuratomi <tkuraotmi@ansible.com>
+# Copyright (c) 2020, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import copy
+import traceback
+
+from ansible.config.manager import ensure_type
+from ansible.errors import AnsibleError, AnsibleFileNotFound, AnsibleAction, AnsibleActionFail
+from ansible.module_utils.parsing.convert_bool import boolean
+from ansible.module_utils.six import string_types
+from ansible.module_utils._text import to_text, to_bytes, to_native
+from ansible.plugins.action import ActionBase
+
+
+class ActionModule(ActionBase):
+
+ TRANSFERS_FILES = True
+ DEFAULT_NEWLINE_SEQUENCE = "\n"
+
+ def _ensure_invocation(self, result):
+ # NOTE: adding invocation arguments here needs to be kept in sync with
+ # any no_log specified in the argument_spec in the module.
+ if 'invocation' not in result:
+ if self._play_context.no_log:
+ result['invocation'] = "CENSORED: no_log is set"
+ else:
+ result['invocation'] = self._task.args.copy()
+ result['invocation']['module_args'] = self._task.args.copy()
+
+ return result
+
+ def run(self, tmp=None, task_vars=None):
+ ''' handler for k8s options '''
+ if task_vars is None:
+ task_vars = dict()
+
+ result = super(ActionModule, self).run(tmp, task_vars)
+ del tmp # tmp no longer has any effect
+
+ new_module_args = copy.deepcopy(self._task.args)
+ kubeconfig = self._task.args.get('kubeconfig', None)
+ # find the file in the expected search path
+ if kubeconfig:
+ try:
+ # find in expected paths
+ kubeconfig = self._find_needle('files', kubeconfig)
+ except AnsibleError as e:
+ result['failed'] = True
+ result['msg'] = to_text(e)
+ result['exception'] = traceback.format_exc()
+ return result
+
+ if kubeconfig:
+ # decrypt kubeconfig found
+ actual_file = self._loader.get_real_file(kubeconfig, decrypt=True)
+ new_module_args['kubeconfig'] = actual_file
+
+ # find the file in the expected search path
+ src = self._task.args.get('src', None)
+ if src:
+ try:
+ # find in expected paths
+ src = self._find_needle('files', src)
+ except AnsibleError as e:
+ result['failed'] = True
+ result['msg'] = to_text(e)
+ result['exception'] = traceback.format_exc()
+ return result
+
+ if src:
+ new_module_args['src'] = src
+
+ template = self._task.args.get('template', None)
+ if template:
+ # template is only supported by k8s module.
+ if self._task.action not in ('k8s', 'community.kubernetes.k8s', 'community.okd.k8s'):
+ raise AnsibleActionFail("'template' is only supported parameter for 'k8s' module.")
+ if isinstance(template, string_types):
+ # treat this as raw_params
+ template_path = template
+ newline_sequence = self.DEFAULT_NEWLINE_SEQUENCE
+ variable_start_string = None
+ variable_end_string = None
+ block_start_string = None
+ block_end_string = None
+ trim_blocks = True
+ lstrip_blocks = False
+ elif isinstance(template, dict):
+ template_args = template
+ template_path = template_args.get('path', None)
+ if not template:
+ raise AnsibleActionFail("Please specify path for template.")
+
+ # Options type validation strings
+ for s_type in ('newline_sequence', 'variable_start_string', 'variable_end_string', 'block_start_string',
+ 'block_end_string'):
+ if s_type in template_args:
+ value = ensure_type(template_args[s_type], 'string')
+ if value is not None and not isinstance(value, string_types):
+ raise AnsibleActionFail("%s is expected to be a string, but got %s instead" % (s_type, type(value)))
+ try:
+ trim_blocks = boolean(template_args.get('trim_blocks', True), strict=False)
+ lstrip_blocks = boolean(template_args.get('lstrip_blocks', False), strict=False)
+ except TypeError as e:
+ raise AnsibleActionFail(to_native(e))
+
+ newline_sequence = template_args.get('newline_sequence', self.DEFAULT_NEWLINE_SEQUENCE)
+ variable_start_string = template_args.get('variable_start_string', None)
+ variable_end_string = template_args.get('variable_end_string', None)
+ block_start_string = template_args.get('block_start_string', None)
+ block_end_string = template_args.get('block_end_string', None)
+ else:
+ raise AnsibleActionFail("Error while reading template file - "
+ "a string or dict for template expected, but got %s instead" % type(template))
+ try:
+ source = self._find_needle('templates', template_path)
+ except AnsibleError as e:
+ raise AnsibleActionFail(to_text(e))
+
+ # Option `lstrip_blocks' was added in Jinja2 version 2.7.
+ if lstrip_blocks:
+ try:
+ import jinja2.defaults
+ except ImportError:
+ raise AnsibleError('Unable to import Jinja2 defaults for determining Jinja2 features.')
+
+ try:
+ jinja2.defaults.LSTRIP_BLOCKS
+ except AttributeError:
+ raise AnsibleError("Option `lstrip_blocks' is only available in Jinja2 versions >=2.7")
+
+ wrong_sequences = ["\\n", "\\r", "\\r\\n"]
+ allowed_sequences = ["\n", "\r", "\r\n"]
+
+ # We need to convert unescaped sequences to proper escaped sequences for Jinja2
+ if newline_sequence in wrong_sequences:
+ newline_sequence = allowed_sequences[wrong_sequences.index(newline_sequence)]
+ elif newline_sequence not in allowed_sequences:
+ raise AnsibleActionFail("newline_sequence needs to be one of: \n, \r or \r\n")
+
+ # Get vault decrypted tmp file
+ try:
+ tmp_source = self._loader.get_real_file(source)
+ except AnsibleFileNotFound as e:
+ raise AnsibleActionFail("could not find template=%s, %s" % (source, to_text(e)))
+ b_tmp_source = to_bytes(tmp_source, errors='surrogate_or_strict')
+
+ # template the source data locally & get ready to transfer
+ try:
+ with open(b_tmp_source, 'rb') as f:
+ try:
+ template_data = to_text(f.read(), errors='surrogate_or_strict')
+ except UnicodeError:
+ raise AnsibleActionFail("Template source files must be utf-8 encoded")
+
+ # add ansible 'template' vars
+ temp_vars = task_vars.copy()
+ old_vars = self._templar.available_variables
+
+ self._templar.environment.newline_sequence = newline_sequence
+ if block_start_string is not None:
+ self._templar.environment.block_start_string = block_start_string
+ if block_end_string is not None:
+ self._templar.environment.block_end_string = block_end_string
+ if variable_start_string is not None:
+ self._templar.environment.variable_start_string = variable_start_string
+ if variable_end_string is not None:
+ self._templar.environment.variable_end_string = variable_end_string
+ self._templar.environment.trim_blocks = trim_blocks
+ self._templar.environment.lstrip_blocks = lstrip_blocks
+ self._templar.available_variables = temp_vars
+ resultant = self._templar.do_template(template_data, preserve_trailing_newlines=True, escape_backslashes=False)
+ self._templar.available_variables = old_vars
+ resource_definition = self._task.args.get('definition', None)
+ if not resource_definition:
+ new_module_args.pop('template')
+ new_module_args['definition'] = resultant
+ except AnsibleAction:
+ raise
+ except Exception as e:
+ raise AnsibleActionFail("%s: %s" % (type(e).__name__, to_text(e)))
+ finally:
+ self._loader.cleanup_tmp_file(b_tmp_source)
+
+ # Execute the k8s_* module.
+ module_return = self._execute_module(module_name=self._task.action, module_args=new_module_args, task_vars=task_vars)
+
+ # Delete tmp path
+ self._remove_tmp_path(self._connection._shell.tmpdir)
+
+ result.update(module_return)
+
+ return self._ensure_invocation(result)
diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/plugins/action/helm_info.py b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/action/helm_info.py
new file mode 100644
index 00000000..6b26225f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/action/helm_info.py
@@ -0,0 +1,198 @@
+# Copyright (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+# Copyright (c) 2017, Toshio Kuratomi <tkuraotmi@ansible.com>
+# Copyright (c) 2020, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import copy
+import traceback
+
+from ansible.config.manager import ensure_type
+from ansible.errors import AnsibleError, AnsibleFileNotFound, AnsibleAction, AnsibleActionFail
+from ansible.module_utils.parsing.convert_bool import boolean
+from ansible.module_utils.six import string_types
+from ansible.module_utils._text import to_text, to_bytes, to_native
+from ansible.plugins.action import ActionBase
+
+
+class ActionModule(ActionBase):
+
+ TRANSFERS_FILES = True
+ DEFAULT_NEWLINE_SEQUENCE = "\n"
+
+ def _ensure_invocation(self, result):
+ # NOTE: adding invocation arguments here needs to be kept in sync with
+ # any no_log specified in the argument_spec in the module.
+ if 'invocation' not in result:
+ if self._play_context.no_log:
+ result['invocation'] = "CENSORED: no_log is set"
+ else:
+ result['invocation'] = self._task.args.copy()
+ result['invocation']['module_args'] = self._task.args.copy()
+
+ return result
+
+ def run(self, tmp=None, task_vars=None):
+ ''' handler for k8s options '''
+ if task_vars is None:
+ task_vars = dict()
+
+ result = super(ActionModule, self).run(tmp, task_vars)
+ del tmp # tmp no longer has any effect
+
+ new_module_args = copy.deepcopy(self._task.args)
+ kubeconfig = self._task.args.get('kubeconfig', None)
+ # find the file in the expected search path
+ if kubeconfig:
+ try:
+ # find in expected paths
+ kubeconfig = self._find_needle('files', kubeconfig)
+ except AnsibleError as e:
+ result['failed'] = True
+ result['msg'] = to_text(e)
+ result['exception'] = traceback.format_exc()
+ return result
+
+ if kubeconfig:
+ # decrypt kubeconfig found
+ actual_file = self._loader.get_real_file(kubeconfig, decrypt=True)
+ new_module_args['kubeconfig'] = actual_file
+
+ # find the file in the expected search path
+ src = self._task.args.get('src', None)
+ if src:
+ try:
+ # find in expected paths
+ src = self._find_needle('files', src)
+ except AnsibleError as e:
+ result['failed'] = True
+ result['msg'] = to_text(e)
+ result['exception'] = traceback.format_exc()
+ return result
+
+ if src:
+ new_module_args['src'] = src
+
+ template = self._task.args.get('template', None)
+ if template:
+ # template is only supported by k8s module.
+ if self._task.action not in ('k8s', 'community.kubernetes.k8s', 'community.okd.k8s'):
+ raise AnsibleActionFail("'template' is only supported parameter for 'k8s' module.")
+ if isinstance(template, string_types):
+ # treat this as raw_params
+ template_path = template
+ newline_sequence = self.DEFAULT_NEWLINE_SEQUENCE
+ variable_start_string = None
+ variable_end_string = None
+ block_start_string = None
+ block_end_string = None
+ trim_blocks = True
+ lstrip_blocks = False
+ elif isinstance(template, dict):
+ template_args = template
+ template_path = template_args.get('path', None)
+ if not template:
+ raise AnsibleActionFail("Please specify path for template.")
+
+ # Options type validation strings
+ for s_type in ('newline_sequence', 'variable_start_string', 'variable_end_string', 'block_start_string',
+ 'block_end_string'):
+ if s_type in template_args:
+ value = ensure_type(template_args[s_type], 'string')
+ if value is not None and not isinstance(value, string_types):
+ raise AnsibleActionFail("%s is expected to be a string, but got %s instead" % (s_type, type(value)))
+ try:
+ trim_blocks = boolean(template_args.get('trim_blocks', True), strict=False)
+ lstrip_blocks = boolean(template_args.get('lstrip_blocks', False), strict=False)
+ except TypeError as e:
+ raise AnsibleActionFail(to_native(e))
+
+ newline_sequence = template_args.get('newline_sequence', self.DEFAULT_NEWLINE_SEQUENCE)
+ variable_start_string = template_args.get('variable_start_string', None)
+ variable_end_string = template_args.get('variable_end_string', None)
+ block_start_string = template_args.get('block_start_string', None)
+ block_end_string = template_args.get('block_end_string', None)
+ else:
+ raise AnsibleActionFail("Error while reading template file - "
+ "a string or dict for template expected, but got %s instead" % type(template))
+ try:
+ source = self._find_needle('templates', template_path)
+ except AnsibleError as e:
+ raise AnsibleActionFail(to_text(e))
+
+ # Option `lstrip_blocks' was added in Jinja2 version 2.7.
+ if lstrip_blocks:
+ try:
+ import jinja2.defaults
+ except ImportError:
+ raise AnsibleError('Unable to import Jinja2 defaults for determining Jinja2 features.')
+
+ try:
+ jinja2.defaults.LSTRIP_BLOCKS
+ except AttributeError:
+ raise AnsibleError("Option `lstrip_blocks' is only available in Jinja2 versions >=2.7")
+
+ wrong_sequences = ["\\n", "\\r", "\\r\\n"]
+ allowed_sequences = ["\n", "\r", "\r\n"]
+
+ # We need to convert unescaped sequences to proper escaped sequences for Jinja2
+ if newline_sequence in wrong_sequences:
+ newline_sequence = allowed_sequences[wrong_sequences.index(newline_sequence)]
+ elif newline_sequence not in allowed_sequences:
+ raise AnsibleActionFail("newline_sequence needs to be one of: \n, \r or \r\n")
+
+ # Get vault decrypted tmp file
+ try:
+ tmp_source = self._loader.get_real_file(source)
+ except AnsibleFileNotFound as e:
+ raise AnsibleActionFail("could not find template=%s, %s" % (source, to_text(e)))
+ b_tmp_source = to_bytes(tmp_source, errors='surrogate_or_strict')
+
+ # template the source data locally & get ready to transfer
+ try:
+ with open(b_tmp_source, 'rb') as f:
+ try:
+ template_data = to_text(f.read(), errors='surrogate_or_strict')
+ except UnicodeError:
+ raise AnsibleActionFail("Template source files must be utf-8 encoded")
+
+ # add ansible 'template' vars
+ temp_vars = task_vars.copy()
+ old_vars = self._templar.available_variables
+
+ self._templar.environment.newline_sequence = newline_sequence
+ if block_start_string is not None:
+ self._templar.environment.block_start_string = block_start_string
+ if block_end_string is not None:
+ self._templar.environment.block_end_string = block_end_string
+ if variable_start_string is not None:
+ self._templar.environment.variable_start_string = variable_start_string
+ if variable_end_string is not None:
+ self._templar.environment.variable_end_string = variable_end_string
+ self._templar.environment.trim_blocks = trim_blocks
+ self._templar.environment.lstrip_blocks = lstrip_blocks
+ self._templar.available_variables = temp_vars
+ resultant = self._templar.do_template(template_data, preserve_trailing_newlines=True, escape_backslashes=False)
+ self._templar.available_variables = old_vars
+ resource_definition = self._task.args.get('definition', None)
+ if not resource_definition:
+ new_module_args.pop('template')
+ new_module_args['definition'] = resultant
+ except AnsibleAction:
+ raise
+ except Exception as e:
+ raise AnsibleActionFail("%s: %s" % (type(e).__name__, to_text(e)))
+ finally:
+ self._loader.cleanup_tmp_file(b_tmp_source)
+
+ # Execute the k8s_* module.
+ module_return = self._execute_module(module_name=self._task.action, module_args=new_module_args, task_vars=task_vars)
+
+ # Delete tmp path
+ self._remove_tmp_path(self._connection._shell.tmpdir)
+
+ result.update(module_return)
+
+ return self._ensure_invocation(result)
diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/plugins/action/helm_plugin.py b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/action/helm_plugin.py
new file mode 100644
index 00000000..6b26225f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/action/helm_plugin.py
@@ -0,0 +1,198 @@
+# Copyright (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+# Copyright (c) 2017, Toshio Kuratomi <tkuraotmi@ansible.com>
+# Copyright (c) 2020, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import copy
+import traceback
+
+from ansible.config.manager import ensure_type
+from ansible.errors import AnsibleError, AnsibleFileNotFound, AnsibleAction, AnsibleActionFail
+from ansible.module_utils.parsing.convert_bool import boolean
+from ansible.module_utils.six import string_types
+from ansible.module_utils._text import to_text, to_bytes, to_native
+from ansible.plugins.action import ActionBase
+
+
+class ActionModule(ActionBase):
+
+ TRANSFERS_FILES = True
+ DEFAULT_NEWLINE_SEQUENCE = "\n"
+
+ def _ensure_invocation(self, result):
+ # NOTE: adding invocation arguments here needs to be kept in sync with
+ # any no_log specified in the argument_spec in the module.
+ if 'invocation' not in result:
+ if self._play_context.no_log:
+ result['invocation'] = "CENSORED: no_log is set"
+ else:
+ result['invocation'] = self._task.args.copy()
+ result['invocation']['module_args'] = self._task.args.copy()
+
+ return result
+
+ def run(self, tmp=None, task_vars=None):
+ ''' handler for k8s options '''
+ if task_vars is None:
+ task_vars = dict()
+
+ result = super(ActionModule, self).run(tmp, task_vars)
+ del tmp # tmp no longer has any effect
+
+ new_module_args = copy.deepcopy(self._task.args)
+ kubeconfig = self._task.args.get('kubeconfig', None)
+ # find the file in the expected search path
+ if kubeconfig:
+ try:
+ # find in expected paths
+ kubeconfig = self._find_needle('files', kubeconfig)
+ except AnsibleError as e:
+ result['failed'] = True
+ result['msg'] = to_text(e)
+ result['exception'] = traceback.format_exc()
+ return result
+
+ if kubeconfig:
+ # decrypt kubeconfig found
+ actual_file = self._loader.get_real_file(kubeconfig, decrypt=True)
+ new_module_args['kubeconfig'] = actual_file
+
+ # find the file in the expected search path
+ src = self._task.args.get('src', None)
+ if src:
+ try:
+ # find in expected paths
+ src = self._find_needle('files', src)
+ except AnsibleError as e:
+ result['failed'] = True
+ result['msg'] = to_text(e)
+ result['exception'] = traceback.format_exc()
+ return result
+
+ if src:
+ new_module_args['src'] = src
+
+ template = self._task.args.get('template', None)
+ if template:
+ # template is only supported by k8s module.
+ if self._task.action not in ('k8s', 'community.kubernetes.k8s', 'community.okd.k8s'):
+ raise AnsibleActionFail("'template' is only supported parameter for 'k8s' module.")
+ if isinstance(template, string_types):
+ # treat this as raw_params
+ template_path = template
+ newline_sequence = self.DEFAULT_NEWLINE_SEQUENCE
+ variable_start_string = None
+ variable_end_string = None
+ block_start_string = None
+ block_end_string = None
+ trim_blocks = True
+ lstrip_blocks = False
+ elif isinstance(template, dict):
+ template_args = template
+ template_path = template_args.get('path', None)
+ if not template:
+ raise AnsibleActionFail("Please specify path for template.")
+
+ # Options type validation strings
+ for s_type in ('newline_sequence', 'variable_start_string', 'variable_end_string', 'block_start_string',
+ 'block_end_string'):
+ if s_type in template_args:
+ value = ensure_type(template_args[s_type], 'string')
+ if value is not None and not isinstance(value, string_types):
+ raise AnsibleActionFail("%s is expected to be a string, but got %s instead" % (s_type, type(value)))
+ try:
+ trim_blocks = boolean(template_args.get('trim_blocks', True), strict=False)
+ lstrip_blocks = boolean(template_args.get('lstrip_blocks', False), strict=False)
+ except TypeError as e:
+ raise AnsibleActionFail(to_native(e))
+
+ newline_sequence = template_args.get('newline_sequence', self.DEFAULT_NEWLINE_SEQUENCE)
+ variable_start_string = template_args.get('variable_start_string', None)
+ variable_end_string = template_args.get('variable_end_string', None)
+ block_start_string = template_args.get('block_start_string', None)
+ block_end_string = template_args.get('block_end_string', None)
+ else:
+ raise AnsibleActionFail("Error while reading template file - "
+ "a string or dict for template expected, but got %s instead" % type(template))
+ try:
+ source = self._find_needle('templates', template_path)
+ except AnsibleError as e:
+ raise AnsibleActionFail(to_text(e))
+
+ # Option `lstrip_blocks' was added in Jinja2 version 2.7.
+ if lstrip_blocks:
+ try:
+ import jinja2.defaults
+ except ImportError:
+ raise AnsibleError('Unable to import Jinja2 defaults for determining Jinja2 features.')
+
+ try:
+ jinja2.defaults.LSTRIP_BLOCKS
+ except AttributeError:
+ raise AnsibleError("Option `lstrip_blocks' is only available in Jinja2 versions >=2.7")
+
+ wrong_sequences = ["\\n", "\\r", "\\r\\n"]
+ allowed_sequences = ["\n", "\r", "\r\n"]
+
+ # We need to convert unescaped sequences to proper escaped sequences for Jinja2
+ if newline_sequence in wrong_sequences:
+ newline_sequence = allowed_sequences[wrong_sequences.index(newline_sequence)]
+ elif newline_sequence not in allowed_sequences:
+ raise AnsibleActionFail("newline_sequence needs to be one of: \n, \r or \r\n")
+
+ # Get vault decrypted tmp file
+ try:
+ tmp_source = self._loader.get_real_file(source)
+ except AnsibleFileNotFound as e:
+ raise AnsibleActionFail("could not find template=%s, %s" % (source, to_text(e)))
+ b_tmp_source = to_bytes(tmp_source, errors='surrogate_or_strict')
+
+ # template the source data locally & get ready to transfer
+ try:
+ with open(b_tmp_source, 'rb') as f:
+ try:
+ template_data = to_text(f.read(), errors='surrogate_or_strict')
+ except UnicodeError:
+ raise AnsibleActionFail("Template source files must be utf-8 encoded")
+
+ # add ansible 'template' vars
+ temp_vars = task_vars.copy()
+ old_vars = self._templar.available_variables
+
+ self._templar.environment.newline_sequence = newline_sequence
+ if block_start_string is not None:
+ self._templar.environment.block_start_string = block_start_string
+ if block_end_string is not None:
+ self._templar.environment.block_end_string = block_end_string
+ if variable_start_string is not None:
+ self._templar.environment.variable_start_string = variable_start_string
+ if variable_end_string is not None:
+ self._templar.environment.variable_end_string = variable_end_string
+ self._templar.environment.trim_blocks = trim_blocks
+ self._templar.environment.lstrip_blocks = lstrip_blocks
+ self._templar.available_variables = temp_vars
+ resultant = self._templar.do_template(template_data, preserve_trailing_newlines=True, escape_backslashes=False)
+ self._templar.available_variables = old_vars
+ resource_definition = self._task.args.get('definition', None)
+ if not resource_definition:
+ new_module_args.pop('template')
+ new_module_args['definition'] = resultant
+ except AnsibleAction:
+ raise
+ except Exception as e:
+ raise AnsibleActionFail("%s: %s" % (type(e).__name__, to_text(e)))
+ finally:
+ self._loader.cleanup_tmp_file(b_tmp_source)
+
+ # Execute the k8s_* module.
+ module_return = self._execute_module(module_name=self._task.action, module_args=new_module_args, task_vars=task_vars)
+
+ # Delete tmp path
+ self._remove_tmp_path(self._connection._shell.tmpdir)
+
+ result.update(module_return)
+
+ return self._ensure_invocation(result)
diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/plugins/action/helm_plugin_info.py b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/action/helm_plugin_info.py
new file mode 100644
index 00000000..6b26225f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/action/helm_plugin_info.py
@@ -0,0 +1,198 @@
+# Copyright (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+# Copyright (c) 2017, Toshio Kuratomi <tkuraotmi@ansible.com>
+# Copyright (c) 2020, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import copy
+import traceback
+
+from ansible.config.manager import ensure_type
+from ansible.errors import AnsibleError, AnsibleFileNotFound, AnsibleAction, AnsibleActionFail
+from ansible.module_utils.parsing.convert_bool import boolean
+from ansible.module_utils.six import string_types
+from ansible.module_utils._text import to_text, to_bytes, to_native
+from ansible.plugins.action import ActionBase
+
+
+class ActionModule(ActionBase):
+
+ TRANSFERS_FILES = True
+ DEFAULT_NEWLINE_SEQUENCE = "\n"
+
+ def _ensure_invocation(self, result):
+ # NOTE: adding invocation arguments here needs to be kept in sync with
+ # any no_log specified in the argument_spec in the module.
+ if 'invocation' not in result:
+ if self._play_context.no_log:
+ result['invocation'] = "CENSORED: no_log is set"
+ else:
+ result['invocation'] = self._task.args.copy()
+ result['invocation']['module_args'] = self._task.args.copy()
+
+ return result
+
+ def run(self, tmp=None, task_vars=None):
+ ''' handler for k8s options '''
+ if task_vars is None:
+ task_vars = dict()
+
+ result = super(ActionModule, self).run(tmp, task_vars)
+ del tmp # tmp no longer has any effect
+
+ new_module_args = copy.deepcopy(self._task.args)
+ kubeconfig = self._task.args.get('kubeconfig', None)
+ # find the file in the expected search path
+ if kubeconfig:
+ try:
+ # find in expected paths
+ kubeconfig = self._find_needle('files', kubeconfig)
+ except AnsibleError as e:
+ result['failed'] = True
+ result['msg'] = to_text(e)
+ result['exception'] = traceback.format_exc()
+ return result
+
+ if kubeconfig:
+ # decrypt kubeconfig found
+ actual_file = self._loader.get_real_file(kubeconfig, decrypt=True)
+ new_module_args['kubeconfig'] = actual_file
+
+ # find the file in the expected search path
+ src = self._task.args.get('src', None)
+ if src:
+ try:
+ # find in expected paths
+ src = self._find_needle('files', src)
+ except AnsibleError as e:
+ result['failed'] = True
+ result['msg'] = to_text(e)
+ result['exception'] = traceback.format_exc()
+ return result
+
+ if src:
+ new_module_args['src'] = src
+
+ template = self._task.args.get('template', None)
+ if template:
+ # template is only supported by k8s module.
+ if self._task.action not in ('k8s', 'community.kubernetes.k8s', 'community.okd.k8s'):
+ raise AnsibleActionFail("'template' is only supported parameter for 'k8s' module.")
+ if isinstance(template, string_types):
+ # treat this as raw_params
+ template_path = template
+ newline_sequence = self.DEFAULT_NEWLINE_SEQUENCE
+ variable_start_string = None
+ variable_end_string = None
+ block_start_string = None
+ block_end_string = None
+ trim_blocks = True
+ lstrip_blocks = False
+ elif isinstance(template, dict):
+ template_args = template
+ template_path = template_args.get('path', None)
+ if not template:
+ raise AnsibleActionFail("Please specify path for template.")
+
+ # Options type validation strings
+ for s_type in ('newline_sequence', 'variable_start_string', 'variable_end_string', 'block_start_string',
+ 'block_end_string'):
+ if s_type in template_args:
+ value = ensure_type(template_args[s_type], 'string')
+ if value is not None and not isinstance(value, string_types):
+ raise AnsibleActionFail("%s is expected to be a string, but got %s instead" % (s_type, type(value)))
+ try:
+ trim_blocks = boolean(template_args.get('trim_blocks', True), strict=False)
+ lstrip_blocks = boolean(template_args.get('lstrip_blocks', False), strict=False)
+ except TypeError as e:
+ raise AnsibleActionFail(to_native(e))
+
+ newline_sequence = template_args.get('newline_sequence', self.DEFAULT_NEWLINE_SEQUENCE)
+ variable_start_string = template_args.get('variable_start_string', None)
+ variable_end_string = template_args.get('variable_end_string', None)
+ block_start_string = template_args.get('block_start_string', None)
+ block_end_string = template_args.get('block_end_string', None)
+ else:
+ raise AnsibleActionFail("Error while reading template file - "
+ "a string or dict for template expected, but got %s instead" % type(template))
+ try:
+ source = self._find_needle('templates', template_path)
+ except AnsibleError as e:
+ raise AnsibleActionFail(to_text(e))
+
+ # Option `lstrip_blocks' was added in Jinja2 version 2.7.
+ if lstrip_blocks:
+ try:
+ import jinja2.defaults
+ except ImportError:
+ raise AnsibleError('Unable to import Jinja2 defaults for determining Jinja2 features.')
+
+ try:
+ jinja2.defaults.LSTRIP_BLOCKS
+ except AttributeError:
+ raise AnsibleError("Option `lstrip_blocks' is only available in Jinja2 versions >=2.7")
+
+ wrong_sequences = ["\\n", "\\r", "\\r\\n"]
+ allowed_sequences = ["\n", "\r", "\r\n"]
+
+ # We need to convert unescaped sequences to proper escaped sequences for Jinja2
+ if newline_sequence in wrong_sequences:
+ newline_sequence = allowed_sequences[wrong_sequences.index(newline_sequence)]
+ elif newline_sequence not in allowed_sequences:
+ raise AnsibleActionFail("newline_sequence needs to be one of: \n, \r or \r\n")
+
+ # Get vault decrypted tmp file
+ try:
+ tmp_source = self._loader.get_real_file(source)
+ except AnsibleFileNotFound as e:
+ raise AnsibleActionFail("could not find template=%s, %s" % (source, to_text(e)))
+ b_tmp_source = to_bytes(tmp_source, errors='surrogate_or_strict')
+
+ # template the source data locally & get ready to transfer
+ try:
+ with open(b_tmp_source, 'rb') as f:
+ try:
+ template_data = to_text(f.read(), errors='surrogate_or_strict')
+ except UnicodeError:
+ raise AnsibleActionFail("Template source files must be utf-8 encoded")
+
+ # add ansible 'template' vars
+ temp_vars = task_vars.copy()
+ old_vars = self._templar.available_variables
+
+ self._templar.environment.newline_sequence = newline_sequence
+ if block_start_string is not None:
+ self._templar.environment.block_start_string = block_start_string
+ if block_end_string is not None:
+ self._templar.environment.block_end_string = block_end_string
+ if variable_start_string is not None:
+ self._templar.environment.variable_start_string = variable_start_string
+ if variable_end_string is not None:
+ self._templar.environment.variable_end_string = variable_end_string
+ self._templar.environment.trim_blocks = trim_blocks
+ self._templar.environment.lstrip_blocks = lstrip_blocks
+ self._templar.available_variables = temp_vars
+ resultant = self._templar.do_template(template_data, preserve_trailing_newlines=True, escape_backslashes=False)
+ self._templar.available_variables = old_vars
+ resource_definition = self._task.args.get('definition', None)
+ if not resource_definition:
+ new_module_args.pop('template')
+ new_module_args['definition'] = resultant
+ except AnsibleAction:
+ raise
+ except Exception as e:
+ raise AnsibleActionFail("%s: %s" % (type(e).__name__, to_text(e)))
+ finally:
+ self._loader.cleanup_tmp_file(b_tmp_source)
+
+ # Execute the k8s_* module.
+ module_return = self._execute_module(module_name=self._task.action, module_args=new_module_args, task_vars=task_vars)
+
+ # Delete tmp path
+ self._remove_tmp_path(self._connection._shell.tmpdir)
+
+ result.update(module_return)
+
+ return self._ensure_invocation(result)
diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/plugins/action/helm_repository.py b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/action/helm_repository.py
new file mode 100644
index 00000000..6b26225f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/action/helm_repository.py
@@ -0,0 +1,198 @@
+# Copyright (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+# Copyright (c) 2017, Toshio Kuratomi <tkuraotmi@ansible.com>
+# Copyright (c) 2020, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import copy
+import traceback
+
+from ansible.config.manager import ensure_type
+from ansible.errors import AnsibleError, AnsibleFileNotFound, AnsibleAction, AnsibleActionFail
+from ansible.module_utils.parsing.convert_bool import boolean
+from ansible.module_utils.six import string_types
+from ansible.module_utils._text import to_text, to_bytes, to_native
+from ansible.plugins.action import ActionBase
+
+
+class ActionModule(ActionBase):
+
+ TRANSFERS_FILES = True
+ DEFAULT_NEWLINE_SEQUENCE = "\n"
+
+ def _ensure_invocation(self, result):
+ # NOTE: adding invocation arguments here needs to be kept in sync with
+ # any no_log specified in the argument_spec in the module.
+ if 'invocation' not in result:
+ if self._play_context.no_log:
+ result['invocation'] = "CENSORED: no_log is set"
+ else:
+ result['invocation'] = self._task.args.copy()
+ result['invocation']['module_args'] = self._task.args.copy()
+
+ return result
+
+ def run(self, tmp=None, task_vars=None):
+ ''' handler for k8s options '''
+ if task_vars is None:
+ task_vars = dict()
+
+ result = super(ActionModule, self).run(tmp, task_vars)
+ del tmp # tmp no longer has any effect
+
+ new_module_args = copy.deepcopy(self._task.args)
+ kubeconfig = self._task.args.get('kubeconfig', None)
+ # find the file in the expected search path
+ if kubeconfig:
+ try:
+ # find in expected paths
+ kubeconfig = self._find_needle('files', kubeconfig)
+ except AnsibleError as e:
+ result['failed'] = True
+ result['msg'] = to_text(e)
+ result['exception'] = traceback.format_exc()
+ return result
+
+ if kubeconfig:
+ # decrypt kubeconfig found
+ actual_file = self._loader.get_real_file(kubeconfig, decrypt=True)
+ new_module_args['kubeconfig'] = actual_file
+
+ # find the file in the expected search path
+ src = self._task.args.get('src', None)
+ if src:
+ try:
+ # find in expected paths
+ src = self._find_needle('files', src)
+ except AnsibleError as e:
+ result['failed'] = True
+ result['msg'] = to_text(e)
+ result['exception'] = traceback.format_exc()
+ return result
+
+ if src:
+ new_module_args['src'] = src
+
+ template = self._task.args.get('template', None)
+ if template:
+ # template is only supported by k8s module.
+ if self._task.action not in ('k8s', 'community.kubernetes.k8s', 'community.okd.k8s'):
+ raise AnsibleActionFail("'template' is only supported parameter for 'k8s' module.")
+ if isinstance(template, string_types):
+ # treat this as raw_params
+ template_path = template
+ newline_sequence = self.DEFAULT_NEWLINE_SEQUENCE
+ variable_start_string = None
+ variable_end_string = None
+ block_start_string = None
+ block_end_string = None
+ trim_blocks = True
+ lstrip_blocks = False
+ elif isinstance(template, dict):
+ template_args = template
+ template_path = template_args.get('path', None)
+ if not template:
+ raise AnsibleActionFail("Please specify path for template.")
+
+ # Options type validation strings
+ for s_type in ('newline_sequence', 'variable_start_string', 'variable_end_string', 'block_start_string',
+ 'block_end_string'):
+ if s_type in template_args:
+ value = ensure_type(template_args[s_type], 'string')
+ if value is not None and not isinstance(value, string_types):
+ raise AnsibleActionFail("%s is expected to be a string, but got %s instead" % (s_type, type(value)))
+ try:
+ trim_blocks = boolean(template_args.get('trim_blocks', True), strict=False)
+ lstrip_blocks = boolean(template_args.get('lstrip_blocks', False), strict=False)
+ except TypeError as e:
+ raise AnsibleActionFail(to_native(e))
+
+ newline_sequence = template_args.get('newline_sequence', self.DEFAULT_NEWLINE_SEQUENCE)
+ variable_start_string = template_args.get('variable_start_string', None)
+ variable_end_string = template_args.get('variable_end_string', None)
+ block_start_string = template_args.get('block_start_string', None)
+ block_end_string = template_args.get('block_end_string', None)
+ else:
+ raise AnsibleActionFail("Error while reading template file - "
+ "a string or dict for template expected, but got %s instead" % type(template))
+ try:
+ source = self._find_needle('templates', template_path)
+ except AnsibleError as e:
+ raise AnsibleActionFail(to_text(e))
+
+ # Option `lstrip_blocks' was added in Jinja2 version 2.7.
+ if lstrip_blocks:
+ try:
+ import jinja2.defaults
+ except ImportError:
+ raise AnsibleError('Unable to import Jinja2 defaults for determining Jinja2 features.')
+
+ try:
+ jinja2.defaults.LSTRIP_BLOCKS
+ except AttributeError:
+ raise AnsibleError("Option `lstrip_blocks' is only available in Jinja2 versions >=2.7")
+
+ wrong_sequences = ["\\n", "\\r", "\\r\\n"]
+ allowed_sequences = ["\n", "\r", "\r\n"]
+
+ # We need to convert unescaped sequences to proper escaped sequences for Jinja2
+ if newline_sequence in wrong_sequences:
+ newline_sequence = allowed_sequences[wrong_sequences.index(newline_sequence)]
+ elif newline_sequence not in allowed_sequences:
+ raise AnsibleActionFail("newline_sequence needs to be one of: \n, \r or \r\n")
+
+ # Get vault decrypted tmp file
+ try:
+ tmp_source = self._loader.get_real_file(source)
+ except AnsibleFileNotFound as e:
+ raise AnsibleActionFail("could not find template=%s, %s" % (source, to_text(e)))
+ b_tmp_source = to_bytes(tmp_source, errors='surrogate_or_strict')
+
+ # template the source data locally & get ready to transfer
+ try:
+ with open(b_tmp_source, 'rb') as f:
+ try:
+ template_data = to_text(f.read(), errors='surrogate_or_strict')
+ except UnicodeError:
+ raise AnsibleActionFail("Template source files must be utf-8 encoded")
+
+ # add ansible 'template' vars
+ temp_vars = task_vars.copy()
+ old_vars = self._templar.available_variables
+
+ self._templar.environment.newline_sequence = newline_sequence
+ if block_start_string is not None:
+ self._templar.environment.block_start_string = block_start_string
+ if block_end_string is not None:
+ self._templar.environment.block_end_string = block_end_string
+ if variable_start_string is not None:
+ self._templar.environment.variable_start_string = variable_start_string
+ if variable_end_string is not None:
+ self._templar.environment.variable_end_string = variable_end_string
+ self._templar.environment.trim_blocks = trim_blocks
+ self._templar.environment.lstrip_blocks = lstrip_blocks
+ self._templar.available_variables = temp_vars
+ resultant = self._templar.do_template(template_data, preserve_trailing_newlines=True, escape_backslashes=False)
+ self._templar.available_variables = old_vars
+ resource_definition = self._task.args.get('definition', None)
+ if not resource_definition:
+ new_module_args.pop('template')
+ new_module_args['definition'] = resultant
+ except AnsibleAction:
+ raise
+ except Exception as e:
+ raise AnsibleActionFail("%s: %s" % (type(e).__name__, to_text(e)))
+ finally:
+ self._loader.cleanup_tmp_file(b_tmp_source)
+
+ # Execute the k8s_* module.
+ module_return = self._execute_module(module_name=self._task.action, module_args=new_module_args, task_vars=task_vars)
+
+ # Delete tmp path
+ self._remove_tmp_path(self._connection._shell.tmpdir)
+
+ result.update(module_return)
+
+ return self._ensure_invocation(result)
diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/plugins/action/k8s.py b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/action/k8s.py
new file mode 100644
index 00000000..6b26225f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/action/k8s.py
@@ -0,0 +1,198 @@
+# Copyright (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+# Copyright (c) 2017, Toshio Kuratomi <tkuraotmi@ansible.com>
+# Copyright (c) 2020, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import copy
+import traceback
+
+from ansible.config.manager import ensure_type
+from ansible.errors import AnsibleError, AnsibleFileNotFound, AnsibleAction, AnsibleActionFail
+from ansible.module_utils.parsing.convert_bool import boolean
+from ansible.module_utils.six import string_types
+from ansible.module_utils._text import to_text, to_bytes, to_native
+from ansible.plugins.action import ActionBase
+
+
+class ActionModule(ActionBase):
+
+ TRANSFERS_FILES = True
+ DEFAULT_NEWLINE_SEQUENCE = "\n"
+
+ def _ensure_invocation(self, result):
+ # NOTE: adding invocation arguments here needs to be kept in sync with
+ # any no_log specified in the argument_spec in the module.
+ if 'invocation' not in result:
+ if self._play_context.no_log:
+ result['invocation'] = "CENSORED: no_log is set"
+ else:
+ result['invocation'] = self._task.args.copy()
+ result['invocation']['module_args'] = self._task.args.copy()
+
+ return result
+
+ def run(self, tmp=None, task_vars=None):
+ ''' handler for k8s options '''
+ if task_vars is None:
+ task_vars = dict()
+
+ result = super(ActionModule, self).run(tmp, task_vars)
+ del tmp # tmp no longer has any effect
+
+ new_module_args = copy.deepcopy(self._task.args)
+ kubeconfig = self._task.args.get('kubeconfig', None)
+ # find the file in the expected search path
+ if kubeconfig:
+ try:
+ # find in expected paths
+ kubeconfig = self._find_needle('files', kubeconfig)
+ except AnsibleError as e:
+ result['failed'] = True
+ result['msg'] = to_text(e)
+ result['exception'] = traceback.format_exc()
+ return result
+
+ if kubeconfig:
+ # decrypt kubeconfig found
+ actual_file = self._loader.get_real_file(kubeconfig, decrypt=True)
+ new_module_args['kubeconfig'] = actual_file
+
+ # find the file in the expected search path
+ src = self._task.args.get('src', None)
+ if src:
+ try:
+ # find in expected paths
+ src = self._find_needle('files', src)
+ except AnsibleError as e:
+ result['failed'] = True
+ result['msg'] = to_text(e)
+ result['exception'] = traceback.format_exc()
+ return result
+
+ if src:
+ new_module_args['src'] = src
+
+ template = self._task.args.get('template', None)
+ if template:
+ # template is only supported by k8s module.
+ if self._task.action not in ('k8s', 'community.kubernetes.k8s', 'community.okd.k8s'):
+ raise AnsibleActionFail("'template' is only supported parameter for 'k8s' module.")
+ if isinstance(template, string_types):
+ # treat this as raw_params
+ template_path = template
+ newline_sequence = self.DEFAULT_NEWLINE_SEQUENCE
+ variable_start_string = None
+ variable_end_string = None
+ block_start_string = None
+ block_end_string = None
+ trim_blocks = True
+ lstrip_blocks = False
+ elif isinstance(template, dict):
+ template_args = template
+ template_path = template_args.get('path', None)
+ if not template:
+ raise AnsibleActionFail("Please specify path for template.")
+
+ # Options type validation strings
+ for s_type in ('newline_sequence', 'variable_start_string', 'variable_end_string', 'block_start_string',
+ 'block_end_string'):
+ if s_type in template_args:
+ value = ensure_type(template_args[s_type], 'string')
+ if value is not None and not isinstance(value, string_types):
+ raise AnsibleActionFail("%s is expected to be a string, but got %s instead" % (s_type, type(value)))
+ try:
+ trim_blocks = boolean(template_args.get('trim_blocks', True), strict=False)
+ lstrip_blocks = boolean(template_args.get('lstrip_blocks', False), strict=False)
+ except TypeError as e:
+ raise AnsibleActionFail(to_native(e))
+
+ newline_sequence = template_args.get('newline_sequence', self.DEFAULT_NEWLINE_SEQUENCE)
+ variable_start_string = template_args.get('variable_start_string', None)
+ variable_end_string = template_args.get('variable_end_string', None)
+ block_start_string = template_args.get('block_start_string', None)
+ block_end_string = template_args.get('block_end_string', None)
+ else:
+ raise AnsibleActionFail("Error while reading template file - "
+ "a string or dict for template expected, but got %s instead" % type(template))
+ try:
+ source = self._find_needle('templates', template_path)
+ except AnsibleError as e:
+ raise AnsibleActionFail(to_text(e))
+
+ # Option `lstrip_blocks' was added in Jinja2 version 2.7.
+ if lstrip_blocks:
+ try:
+ import jinja2.defaults
+ except ImportError:
+ raise AnsibleError('Unable to import Jinja2 defaults for determining Jinja2 features.')
+
+ try:
+ jinja2.defaults.LSTRIP_BLOCKS
+ except AttributeError:
+ raise AnsibleError("Option `lstrip_blocks' is only available in Jinja2 versions >=2.7")
+
+ wrong_sequences = ["\\n", "\\r", "\\r\\n"]
+ allowed_sequences = ["\n", "\r", "\r\n"]
+
+ # We need to convert unescaped sequences to proper escaped sequences for Jinja2
+ if newline_sequence in wrong_sequences:
+ newline_sequence = allowed_sequences[wrong_sequences.index(newline_sequence)]
+ elif newline_sequence not in allowed_sequences:
+ raise AnsibleActionFail("newline_sequence needs to be one of: \n, \r or \r\n")
+
+ # Get vault decrypted tmp file
+ try:
+ tmp_source = self._loader.get_real_file(source)
+ except AnsibleFileNotFound as e:
+ raise AnsibleActionFail("could not find template=%s, %s" % (source, to_text(e)))
+ b_tmp_source = to_bytes(tmp_source, errors='surrogate_or_strict')
+
+ # template the source data locally & get ready to transfer
+ try:
+ with open(b_tmp_source, 'rb') as f:
+ try:
+ template_data = to_text(f.read(), errors='surrogate_or_strict')
+ except UnicodeError:
+ raise AnsibleActionFail("Template source files must be utf-8 encoded")
+
+ # add ansible 'template' vars
+ temp_vars = task_vars.copy()
+ old_vars = self._templar.available_variables
+
+ self._templar.environment.newline_sequence = newline_sequence
+ if block_start_string is not None:
+ self._templar.environment.block_start_string = block_start_string
+ if block_end_string is not None:
+ self._templar.environment.block_end_string = block_end_string
+ if variable_start_string is not None:
+ self._templar.environment.variable_start_string = variable_start_string
+ if variable_end_string is not None:
+ self._templar.environment.variable_end_string = variable_end_string
+ self._templar.environment.trim_blocks = trim_blocks
+ self._templar.environment.lstrip_blocks = lstrip_blocks
+ self._templar.available_variables = temp_vars
+ resultant = self._templar.do_template(template_data, preserve_trailing_newlines=True, escape_backslashes=False)
+ self._templar.available_variables = old_vars
+ resource_definition = self._task.args.get('definition', None)
+ if not resource_definition:
+ new_module_args.pop('template')
+ new_module_args['definition'] = resultant
+ except AnsibleAction:
+ raise
+ except Exception as e:
+ raise AnsibleActionFail("%s: %s" % (type(e).__name__, to_text(e)))
+ finally:
+ self._loader.cleanup_tmp_file(b_tmp_source)
+
+ # Execute the k8s_* module.
+ module_return = self._execute_module(module_name=self._task.action, module_args=new_module_args, task_vars=task_vars)
+
+ # Delete tmp path
+ self._remove_tmp_path(self._connection._shell.tmpdir)
+
+ result.update(module_return)
+
+ return self._ensure_invocation(result)
diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/plugins/action/k8s_auth.py b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/action/k8s_auth.py
new file mode 100644
index 00000000..6b26225f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/action/k8s_auth.py
@@ -0,0 +1,198 @@
+# Copyright (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+# Copyright (c) 2017, Toshio Kuratomi <tkuraotmi@ansible.com>
+# Copyright (c) 2020, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import copy
+import traceback
+
+from ansible.config.manager import ensure_type
+from ansible.errors import AnsibleError, AnsibleFileNotFound, AnsibleAction, AnsibleActionFail
+from ansible.module_utils.parsing.convert_bool import boolean
+from ansible.module_utils.six import string_types
+from ansible.module_utils._text import to_text, to_bytes, to_native
+from ansible.plugins.action import ActionBase
+
+
+class ActionModule(ActionBase):
+
+ TRANSFERS_FILES = True
+ DEFAULT_NEWLINE_SEQUENCE = "\n"
+
+ def _ensure_invocation(self, result):
+ # NOTE: adding invocation arguments here needs to be kept in sync with
+ # any no_log specified in the argument_spec in the module.
+ if 'invocation' not in result:
+ if self._play_context.no_log:
+ result['invocation'] = "CENSORED: no_log is set"
+ else:
+ result['invocation'] = self._task.args.copy()
+ result['invocation']['module_args'] = self._task.args.copy()
+
+ return result
+
+ def run(self, tmp=None, task_vars=None):
+ ''' handler for k8s options '''
+ if task_vars is None:
+ task_vars = dict()
+
+ result = super(ActionModule, self).run(tmp, task_vars)
+ del tmp # tmp no longer has any effect
+
+ new_module_args = copy.deepcopy(self._task.args)
+ kubeconfig = self._task.args.get('kubeconfig', None)
+ # find the file in the expected search path
+ if kubeconfig:
+ try:
+ # find in expected paths
+ kubeconfig = self._find_needle('files', kubeconfig)
+ except AnsibleError as e:
+ result['failed'] = True
+ result['msg'] = to_text(e)
+ result['exception'] = traceback.format_exc()
+ return result
+
+ if kubeconfig:
+ # decrypt kubeconfig found
+ actual_file = self._loader.get_real_file(kubeconfig, decrypt=True)
+ new_module_args['kubeconfig'] = actual_file
+
+ # find the file in the expected search path
+ src = self._task.args.get('src', None)
+ if src:
+ try:
+ # find in expected paths
+ src = self._find_needle('files', src)
+ except AnsibleError as e:
+ result['failed'] = True
+ result['msg'] = to_text(e)
+ result['exception'] = traceback.format_exc()
+ return result
+
+ if src:
+ new_module_args['src'] = src
+
+ template = self._task.args.get('template', None)
+ if template:
+ # template is only supported by k8s module.
+ if self._task.action not in ('k8s', 'community.kubernetes.k8s', 'community.okd.k8s'):
+ raise AnsibleActionFail("'template' is only supported parameter for 'k8s' module.")
+ if isinstance(template, string_types):
+ # treat this as raw_params
+ template_path = template
+ newline_sequence = self.DEFAULT_NEWLINE_SEQUENCE
+ variable_start_string = None
+ variable_end_string = None
+ block_start_string = None
+ block_end_string = None
+ trim_blocks = True
+ lstrip_blocks = False
+ elif isinstance(template, dict):
+ template_args = template
+ template_path = template_args.get('path', None)
+ if not template:
+ raise AnsibleActionFail("Please specify path for template.")
+
+ # Options type validation strings
+ for s_type in ('newline_sequence', 'variable_start_string', 'variable_end_string', 'block_start_string',
+ 'block_end_string'):
+ if s_type in template_args:
+ value = ensure_type(template_args[s_type], 'string')
+ if value is not None and not isinstance(value, string_types):
+ raise AnsibleActionFail("%s is expected to be a string, but got %s instead" % (s_type, type(value)))
+ try:
+ trim_blocks = boolean(template_args.get('trim_blocks', True), strict=False)
+ lstrip_blocks = boolean(template_args.get('lstrip_blocks', False), strict=False)
+ except TypeError as e:
+ raise AnsibleActionFail(to_native(e))
+
+ newline_sequence = template_args.get('newline_sequence', self.DEFAULT_NEWLINE_SEQUENCE)
+ variable_start_string = template_args.get('variable_start_string', None)
+ variable_end_string = template_args.get('variable_end_string', None)
+ block_start_string = template_args.get('block_start_string', None)
+ block_end_string = template_args.get('block_end_string', None)
+ else:
+ raise AnsibleActionFail("Error while reading template file - "
+ "a string or dict for template expected, but got %s instead" % type(template))
+ try:
+ source = self._find_needle('templates', template_path)
+ except AnsibleError as e:
+ raise AnsibleActionFail(to_text(e))
+
+ # Option `lstrip_blocks' was added in Jinja2 version 2.7.
+ if lstrip_blocks:
+ try:
+ import jinja2.defaults
+ except ImportError:
+ raise AnsibleError('Unable to import Jinja2 defaults for determining Jinja2 features.')
+
+ try:
+ jinja2.defaults.LSTRIP_BLOCKS
+ except AttributeError:
+ raise AnsibleError("Option `lstrip_blocks' is only available in Jinja2 versions >=2.7")
+
+ wrong_sequences = ["\\n", "\\r", "\\r\\n"]
+ allowed_sequences = ["\n", "\r", "\r\n"]
+
+ # We need to convert unescaped sequences to proper escaped sequences for Jinja2
+ if newline_sequence in wrong_sequences:
+ newline_sequence = allowed_sequences[wrong_sequences.index(newline_sequence)]
+ elif newline_sequence not in allowed_sequences:
+ raise AnsibleActionFail("newline_sequence needs to be one of: \n, \r or \r\n")
+
+ # Get vault decrypted tmp file
+ try:
+ tmp_source = self._loader.get_real_file(source)
+ except AnsibleFileNotFound as e:
+ raise AnsibleActionFail("could not find template=%s, %s" % (source, to_text(e)))
+ b_tmp_source = to_bytes(tmp_source, errors='surrogate_or_strict')
+
+ # template the source data locally & get ready to transfer
+ try:
+ with open(b_tmp_source, 'rb') as f:
+ try:
+ template_data = to_text(f.read(), errors='surrogate_or_strict')
+ except UnicodeError:
+ raise AnsibleActionFail("Template source files must be utf-8 encoded")
+
+ # add ansible 'template' vars
+ temp_vars = task_vars.copy()
+ old_vars = self._templar.available_variables
+
+ self._templar.environment.newline_sequence = newline_sequence
+ if block_start_string is not None:
+ self._templar.environment.block_start_string = block_start_string
+ if block_end_string is not None:
+ self._templar.environment.block_end_string = block_end_string
+ if variable_start_string is not None:
+ self._templar.environment.variable_start_string = variable_start_string
+ if variable_end_string is not None:
+ self._templar.environment.variable_end_string = variable_end_string
+ self._templar.environment.trim_blocks = trim_blocks
+ self._templar.environment.lstrip_blocks = lstrip_blocks
+ self._templar.available_variables = temp_vars
+ resultant = self._templar.do_template(template_data, preserve_trailing_newlines=True, escape_backslashes=False)
+ self._templar.available_variables = old_vars
+ resource_definition = self._task.args.get('definition', None)
+ if not resource_definition:
+ new_module_args.pop('template')
+ new_module_args['definition'] = resultant
+ except AnsibleAction:
+ raise
+ except Exception as e:
+ raise AnsibleActionFail("%s: %s" % (type(e).__name__, to_text(e)))
+ finally:
+ self._loader.cleanup_tmp_file(b_tmp_source)
+
+ # Execute the k8s_* module.
+ module_return = self._execute_module(module_name=self._task.action, module_args=new_module_args, task_vars=task_vars)
+
+ # Delete tmp path
+ self._remove_tmp_path(self._connection._shell.tmpdir)
+
+ result.update(module_return)
+
+ return self._ensure_invocation(result)
diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/plugins/action/k8s_exec.py b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/action/k8s_exec.py
new file mode 100644
index 00000000..6b26225f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/action/k8s_exec.py
@@ -0,0 +1,198 @@
+# Copyright (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+# Copyright (c) 2017, Toshio Kuratomi <tkuraotmi@ansible.com>
+# Copyright (c) 2020, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import copy
+import traceback
+
+from ansible.config.manager import ensure_type
+from ansible.errors import AnsibleError, AnsibleFileNotFound, AnsibleAction, AnsibleActionFail
+from ansible.module_utils.parsing.convert_bool import boolean
+from ansible.module_utils.six import string_types
+from ansible.module_utils._text import to_text, to_bytes, to_native
+from ansible.plugins.action import ActionBase
+
+
+class ActionModule(ActionBase):
+
+ TRANSFERS_FILES = True
+ DEFAULT_NEWLINE_SEQUENCE = "\n"
+
+ def _ensure_invocation(self, result):
+ # NOTE: adding invocation arguments here needs to be kept in sync with
+ # any no_log specified in the argument_spec in the module.
+ if 'invocation' not in result:
+ if self._play_context.no_log:
+ result['invocation'] = "CENSORED: no_log is set"
+ else:
+ result['invocation'] = self._task.args.copy()
+ result['invocation']['module_args'] = self._task.args.copy()
+
+ return result
+
+ def run(self, tmp=None, task_vars=None):
+ ''' handler for k8s options '''
+ if task_vars is None:
+ task_vars = dict()
+
+ result = super(ActionModule, self).run(tmp, task_vars)
+ del tmp # tmp no longer has any effect
+
+ new_module_args = copy.deepcopy(self._task.args)
+ kubeconfig = self._task.args.get('kubeconfig', None)
+ # find the file in the expected search path
+ if kubeconfig:
+ try:
+ # find in expected paths
+ kubeconfig = self._find_needle('files', kubeconfig)
+ except AnsibleError as e:
+ result['failed'] = True
+ result['msg'] = to_text(e)
+ result['exception'] = traceback.format_exc()
+ return result
+
+ if kubeconfig:
+ # decrypt kubeconfig found
+ actual_file = self._loader.get_real_file(kubeconfig, decrypt=True)
+ new_module_args['kubeconfig'] = actual_file
+
+ # find the file in the expected search path
+ src = self._task.args.get('src', None)
+ if src:
+ try:
+ # find in expected paths
+ src = self._find_needle('files', src)
+ except AnsibleError as e:
+ result['failed'] = True
+ result['msg'] = to_text(e)
+ result['exception'] = traceback.format_exc()
+ return result
+
+ if src:
+ new_module_args['src'] = src
+
+ template = self._task.args.get('template', None)
+ if template:
+ # template is only supported by k8s module.
+ if self._task.action not in ('k8s', 'community.kubernetes.k8s', 'community.okd.k8s'):
+ raise AnsibleActionFail("'template' is only supported parameter for 'k8s' module.")
+ if isinstance(template, string_types):
+ # treat this as raw_params
+ template_path = template
+ newline_sequence = self.DEFAULT_NEWLINE_SEQUENCE
+ variable_start_string = None
+ variable_end_string = None
+ block_start_string = None
+ block_end_string = None
+ trim_blocks = True
+ lstrip_blocks = False
+ elif isinstance(template, dict):
+ template_args = template
+ template_path = template_args.get('path', None)
+ if not template:
+ raise AnsibleActionFail("Please specify path for template.")
+
+ # Options type validation strings
+ for s_type in ('newline_sequence', 'variable_start_string', 'variable_end_string', 'block_start_string',
+ 'block_end_string'):
+ if s_type in template_args:
+ value = ensure_type(template_args[s_type], 'string')
+ if value is not None and not isinstance(value, string_types):
+ raise AnsibleActionFail("%s is expected to be a string, but got %s instead" % (s_type, type(value)))
+ try:
+ trim_blocks = boolean(template_args.get('trim_blocks', True), strict=False)
+ lstrip_blocks = boolean(template_args.get('lstrip_blocks', False), strict=False)
+ except TypeError as e:
+ raise AnsibleActionFail(to_native(e))
+
+ newline_sequence = template_args.get('newline_sequence', self.DEFAULT_NEWLINE_SEQUENCE)
+ variable_start_string = template_args.get('variable_start_string', None)
+ variable_end_string = template_args.get('variable_end_string', None)
+ block_start_string = template_args.get('block_start_string', None)
+ block_end_string = template_args.get('block_end_string', None)
+ else:
+ raise AnsibleActionFail("Error while reading template file - "
+ "a string or dict for template expected, but got %s instead" % type(template))
+ try:
+ source = self._find_needle('templates', template_path)
+ except AnsibleError as e:
+ raise AnsibleActionFail(to_text(e))
+
+ # Option `lstrip_blocks' was added in Jinja2 version 2.7.
+ if lstrip_blocks:
+ try:
+ import jinja2.defaults
+ except ImportError:
+ raise AnsibleError('Unable to import Jinja2 defaults for determining Jinja2 features.')
+
+ try:
+ jinja2.defaults.LSTRIP_BLOCKS
+ except AttributeError:
+ raise AnsibleError("Option `lstrip_blocks' is only available in Jinja2 versions >=2.7")
+
+ wrong_sequences = ["\\n", "\\r", "\\r\\n"]
+ allowed_sequences = ["\n", "\r", "\r\n"]
+
+ # We need to convert unescaped sequences to proper escaped sequences for Jinja2
+ if newline_sequence in wrong_sequences:
+ newline_sequence = allowed_sequences[wrong_sequences.index(newline_sequence)]
+ elif newline_sequence not in allowed_sequences:
+ raise AnsibleActionFail("newline_sequence needs to be one of: \n, \r or \r\n")
+
+ # Get vault decrypted tmp file
+ try:
+ tmp_source = self._loader.get_real_file(source)
+ except AnsibleFileNotFound as e:
+ raise AnsibleActionFail("could not find template=%s, %s" % (source, to_text(e)))
+ b_tmp_source = to_bytes(tmp_source, errors='surrogate_or_strict')
+
+ # template the source data locally & get ready to transfer
+ try:
+ with open(b_tmp_source, 'rb') as f:
+ try:
+ template_data = to_text(f.read(), errors='surrogate_or_strict')
+ except UnicodeError:
+ raise AnsibleActionFail("Template source files must be utf-8 encoded")
+
+ # add ansible 'template' vars
+ temp_vars = task_vars.copy()
+ old_vars = self._templar.available_variables
+
+ self._templar.environment.newline_sequence = newline_sequence
+ if block_start_string is not None:
+ self._templar.environment.block_start_string = block_start_string
+ if block_end_string is not None:
+ self._templar.environment.block_end_string = block_end_string
+ if variable_start_string is not None:
+ self._templar.environment.variable_start_string = variable_start_string
+ if variable_end_string is not None:
+ self._templar.environment.variable_end_string = variable_end_string
+ self._templar.environment.trim_blocks = trim_blocks
+ self._templar.environment.lstrip_blocks = lstrip_blocks
+ self._templar.available_variables = temp_vars
+ resultant = self._templar.do_template(template_data, preserve_trailing_newlines=True, escape_backslashes=False)
+ self._templar.available_variables = old_vars
+ resource_definition = self._task.args.get('definition', None)
+ if not resource_definition:
+ new_module_args.pop('template')
+ new_module_args['definition'] = resultant
+ except AnsibleAction:
+ raise
+ except Exception as e:
+ raise AnsibleActionFail("%s: %s" % (type(e).__name__, to_text(e)))
+ finally:
+ self._loader.cleanup_tmp_file(b_tmp_source)
+
+ # Execute the k8s_* module.
+ module_return = self._execute_module(module_name=self._task.action, module_args=new_module_args, task_vars=task_vars)
+
+ # Delete tmp path
+ self._remove_tmp_path(self._connection._shell.tmpdir)
+
+ result.update(module_return)
+
+ return self._ensure_invocation(result)
diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/plugins/action/k8s_info.py b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/action/k8s_info.py
new file mode 100644
index 00000000..6b26225f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/action/k8s_info.py
@@ -0,0 +1,198 @@
+# Copyright (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+# Copyright (c) 2017, Toshio Kuratomi <tkuraotmi@ansible.com>
+# Copyright (c) 2020, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import copy
+import traceback
+
+from ansible.config.manager import ensure_type
+from ansible.errors import AnsibleError, AnsibleFileNotFound, AnsibleAction, AnsibleActionFail
+from ansible.module_utils.parsing.convert_bool import boolean
+from ansible.module_utils.six import string_types
+from ansible.module_utils._text import to_text, to_bytes, to_native
+from ansible.plugins.action import ActionBase
+
+
+class ActionModule(ActionBase):
+
+ TRANSFERS_FILES = True
+ DEFAULT_NEWLINE_SEQUENCE = "\n"
+
+ def _ensure_invocation(self, result):
+ # NOTE: adding invocation arguments here needs to be kept in sync with
+ # any no_log specified in the argument_spec in the module.
+ if 'invocation' not in result:
+ if self._play_context.no_log:
+ result['invocation'] = "CENSORED: no_log is set"
+ else:
+ result['invocation'] = self._task.args.copy()
+ result['invocation']['module_args'] = self._task.args.copy()
+
+ return result
+
+ def run(self, tmp=None, task_vars=None):
+ ''' handler for k8s options '''
+ if task_vars is None:
+ task_vars = dict()
+
+ result = super(ActionModule, self).run(tmp, task_vars)
+ del tmp # tmp no longer has any effect
+
+ new_module_args = copy.deepcopy(self._task.args)
+ kubeconfig = self._task.args.get('kubeconfig', None)
+ # find the file in the expected search path
+ if kubeconfig:
+ try:
+ # find in expected paths
+ kubeconfig = self._find_needle('files', kubeconfig)
+ except AnsibleError as e:
+ result['failed'] = True
+ result['msg'] = to_text(e)
+ result['exception'] = traceback.format_exc()
+ return result
+
+ if kubeconfig:
+ # decrypt kubeconfig found
+ actual_file = self._loader.get_real_file(kubeconfig, decrypt=True)
+ new_module_args['kubeconfig'] = actual_file
+
+ # find the file in the expected search path
+ src = self._task.args.get('src', None)
+ if src:
+ try:
+ # find in expected paths
+ src = self._find_needle('files', src)
+ except AnsibleError as e:
+ result['failed'] = True
+ result['msg'] = to_text(e)
+ result['exception'] = traceback.format_exc()
+ return result
+
+ if src:
+ new_module_args['src'] = src
+
+ template = self._task.args.get('template', None)
+ if template:
+ # template is only supported by k8s module.
+ if self._task.action not in ('k8s', 'community.kubernetes.k8s', 'community.okd.k8s'):
+ raise AnsibleActionFail("'template' is only supported parameter for 'k8s' module.")
+ if isinstance(template, string_types):
+ # treat this as raw_params
+ template_path = template
+ newline_sequence = self.DEFAULT_NEWLINE_SEQUENCE
+ variable_start_string = None
+ variable_end_string = None
+ block_start_string = None
+ block_end_string = None
+ trim_blocks = True
+ lstrip_blocks = False
+ elif isinstance(template, dict):
+ template_args = template
+ template_path = template_args.get('path', None)
+ if not template:
+ raise AnsibleActionFail("Please specify path for template.")
+
+ # Options type validation strings
+ for s_type in ('newline_sequence', 'variable_start_string', 'variable_end_string', 'block_start_string',
+ 'block_end_string'):
+ if s_type in template_args:
+ value = ensure_type(template_args[s_type], 'string')
+ if value is not None and not isinstance(value, string_types):
+ raise AnsibleActionFail("%s is expected to be a string, but got %s instead" % (s_type, type(value)))
+ try:
+ trim_blocks = boolean(template_args.get('trim_blocks', True), strict=False)
+ lstrip_blocks = boolean(template_args.get('lstrip_blocks', False), strict=False)
+ except TypeError as e:
+ raise AnsibleActionFail(to_native(e))
+
+ newline_sequence = template_args.get('newline_sequence', self.DEFAULT_NEWLINE_SEQUENCE)
+ variable_start_string = template_args.get('variable_start_string', None)
+ variable_end_string = template_args.get('variable_end_string', None)
+ block_start_string = template_args.get('block_start_string', None)
+ block_end_string = template_args.get('block_end_string', None)
+ else:
+ raise AnsibleActionFail("Error while reading template file - "
+ "a string or dict for template expected, but got %s instead" % type(template))
+ try:
+ source = self._find_needle('templates', template_path)
+ except AnsibleError as e:
+ raise AnsibleActionFail(to_text(e))
+
+ # Option `lstrip_blocks' was added in Jinja2 version 2.7.
+ if lstrip_blocks:
+ try:
+ import jinja2.defaults
+ except ImportError:
+ raise AnsibleError('Unable to import Jinja2 defaults for determining Jinja2 features.')
+
+ try:
+ jinja2.defaults.LSTRIP_BLOCKS
+ except AttributeError:
+ raise AnsibleError("Option `lstrip_blocks' is only available in Jinja2 versions >=2.7")
+
+ wrong_sequences = ["\\n", "\\r", "\\r\\n"]
+ allowed_sequences = ["\n", "\r", "\r\n"]
+
+ # We need to convert unescaped sequences to proper escaped sequences for Jinja2
+ if newline_sequence in wrong_sequences:
+ newline_sequence = allowed_sequences[wrong_sequences.index(newline_sequence)]
+ elif newline_sequence not in allowed_sequences:
+ raise AnsibleActionFail("newline_sequence needs to be one of: \n, \r or \r\n")
+
+ # Get vault decrypted tmp file
+ try:
+ tmp_source = self._loader.get_real_file(source)
+ except AnsibleFileNotFound as e:
+ raise AnsibleActionFail("could not find template=%s, %s" % (source, to_text(e)))
+ b_tmp_source = to_bytes(tmp_source, errors='surrogate_or_strict')
+
+ # template the source data locally & get ready to transfer
+ try:
+ with open(b_tmp_source, 'rb') as f:
+ try:
+ template_data = to_text(f.read(), errors='surrogate_or_strict')
+ except UnicodeError:
+ raise AnsibleActionFail("Template source files must be utf-8 encoded")
+
+ # add ansible 'template' vars
+ temp_vars = task_vars.copy()
+ old_vars = self._templar.available_variables
+
+ self._templar.environment.newline_sequence = newline_sequence
+ if block_start_string is not None:
+ self._templar.environment.block_start_string = block_start_string
+ if block_end_string is not None:
+ self._templar.environment.block_end_string = block_end_string
+ if variable_start_string is not None:
+ self._templar.environment.variable_start_string = variable_start_string
+ if variable_end_string is not None:
+ self._templar.environment.variable_end_string = variable_end_string
+ self._templar.environment.trim_blocks = trim_blocks
+ self._templar.environment.lstrip_blocks = lstrip_blocks
+ self._templar.available_variables = temp_vars
+ resultant = self._templar.do_template(template_data, preserve_trailing_newlines=True, escape_backslashes=False)
+ self._templar.available_variables = old_vars
+ resource_definition = self._task.args.get('definition', None)
+ if not resource_definition:
+ new_module_args.pop('template')
+ new_module_args['definition'] = resultant
+ except AnsibleAction:
+ raise
+ except Exception as e:
+ raise AnsibleActionFail("%s: %s" % (type(e).__name__, to_text(e)))
+ finally:
+ self._loader.cleanup_tmp_file(b_tmp_source)
+
+ # Execute the k8s_* module.
+ module_return = self._execute_module(module_name=self._task.action, module_args=new_module_args, task_vars=task_vars)
+
+ # Delete tmp path
+ self._remove_tmp_path(self._connection._shell.tmpdir)
+
+ result.update(module_return)
+
+ return self._ensure_invocation(result)
diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/plugins/action/k8s_log.py b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/action/k8s_log.py
new file mode 100644
index 00000000..6b26225f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/action/k8s_log.py
@@ -0,0 +1,198 @@
+# Copyright (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+# Copyright (c) 2017, Toshio Kuratomi <tkuraotmi@ansible.com>
+# Copyright (c) 2020, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import copy
+import traceback
+
+from ansible.config.manager import ensure_type
+from ansible.errors import AnsibleError, AnsibleFileNotFound, AnsibleAction, AnsibleActionFail
+from ansible.module_utils.parsing.convert_bool import boolean
+from ansible.module_utils.six import string_types
+from ansible.module_utils._text import to_text, to_bytes, to_native
+from ansible.plugins.action import ActionBase
+
+
+class ActionModule(ActionBase):
+
+ TRANSFERS_FILES = True
+ DEFAULT_NEWLINE_SEQUENCE = "\n"
+
+ def _ensure_invocation(self, result):
+ # NOTE: adding invocation arguments here needs to be kept in sync with
+ # any no_log specified in the argument_spec in the module.
+ if 'invocation' not in result:
+ if self._play_context.no_log:
+ result['invocation'] = "CENSORED: no_log is set"
+ else:
+ result['invocation'] = self._task.args.copy()
+ result['invocation']['module_args'] = self._task.args.copy()
+
+ return result
+
+ def run(self, tmp=None, task_vars=None):
+ ''' handler for k8s options '''
+ if task_vars is None:
+ task_vars = dict()
+
+ result = super(ActionModule, self).run(tmp, task_vars)
+ del tmp # tmp no longer has any effect
+
+ new_module_args = copy.deepcopy(self._task.args)
+ kubeconfig = self._task.args.get('kubeconfig', None)
+ # find the file in the expected search path
+ if kubeconfig:
+ try:
+ # find in expected paths
+ kubeconfig = self._find_needle('files', kubeconfig)
+ except AnsibleError as e:
+ result['failed'] = True
+ result['msg'] = to_text(e)
+ result['exception'] = traceback.format_exc()
+ return result
+
+ if kubeconfig:
+ # decrypt kubeconfig found
+ actual_file = self._loader.get_real_file(kubeconfig, decrypt=True)
+ new_module_args['kubeconfig'] = actual_file
+
+ # find the file in the expected search path
+ src = self._task.args.get('src', None)
+ if src:
+ try:
+ # find in expected paths
+ src = self._find_needle('files', src)
+ except AnsibleError as e:
+ result['failed'] = True
+ result['msg'] = to_text(e)
+ result['exception'] = traceback.format_exc()
+ return result
+
+ if src:
+ new_module_args['src'] = src
+
+ template = self._task.args.get('template', None)
+ if template:
+ # template is only supported by k8s module.
+ if self._task.action not in ('k8s', 'community.kubernetes.k8s', 'community.okd.k8s'):
+ raise AnsibleActionFail("'template' is only supported parameter for 'k8s' module.")
+ if isinstance(template, string_types):
+ # treat this as raw_params
+ template_path = template
+ newline_sequence = self.DEFAULT_NEWLINE_SEQUENCE
+ variable_start_string = None
+ variable_end_string = None
+ block_start_string = None
+ block_end_string = None
+ trim_blocks = True
+ lstrip_blocks = False
+ elif isinstance(template, dict):
+ template_args = template
+ template_path = template_args.get('path', None)
+ if not template:
+ raise AnsibleActionFail("Please specify path for template.")
+
+ # Options type validation strings
+ for s_type in ('newline_sequence', 'variable_start_string', 'variable_end_string', 'block_start_string',
+ 'block_end_string'):
+ if s_type in template_args:
+ value = ensure_type(template_args[s_type], 'string')
+ if value is not None and not isinstance(value, string_types):
+ raise AnsibleActionFail("%s is expected to be a string, but got %s instead" % (s_type, type(value)))
+ try:
+ trim_blocks = boolean(template_args.get('trim_blocks', True), strict=False)
+ lstrip_blocks = boolean(template_args.get('lstrip_blocks', False), strict=False)
+ except TypeError as e:
+ raise AnsibleActionFail(to_native(e))
+
+ newline_sequence = template_args.get('newline_sequence', self.DEFAULT_NEWLINE_SEQUENCE)
+ variable_start_string = template_args.get('variable_start_string', None)
+ variable_end_string = template_args.get('variable_end_string', None)
+ block_start_string = template_args.get('block_start_string', None)
+ block_end_string = template_args.get('block_end_string', None)
+ else:
+ raise AnsibleActionFail("Error while reading template file - "
+ "a string or dict for template expected, but got %s instead" % type(template))
+ try:
+ source = self._find_needle('templates', template_path)
+ except AnsibleError as e:
+ raise AnsibleActionFail(to_text(e))
+
+ # Option `lstrip_blocks' was added in Jinja2 version 2.7.
+ if lstrip_blocks:
+ try:
+ import jinja2.defaults
+ except ImportError:
+ raise AnsibleError('Unable to import Jinja2 defaults for determining Jinja2 features.')
+
+ try:
+ jinja2.defaults.LSTRIP_BLOCKS
+ except AttributeError:
+ raise AnsibleError("Option `lstrip_blocks' is only available in Jinja2 versions >=2.7")
+
+ wrong_sequences = ["\\n", "\\r", "\\r\\n"]
+ allowed_sequences = ["\n", "\r", "\r\n"]
+
+ # We need to convert unescaped sequences to proper escaped sequences for Jinja2
+ if newline_sequence in wrong_sequences:
+ newline_sequence = allowed_sequences[wrong_sequences.index(newline_sequence)]
+ elif newline_sequence not in allowed_sequences:
+ raise AnsibleActionFail("newline_sequence needs to be one of: \n, \r or \r\n")
+
+ # Get vault decrypted tmp file
+ try:
+ tmp_source = self._loader.get_real_file(source)
+ except AnsibleFileNotFound as e:
+ raise AnsibleActionFail("could not find template=%s, %s" % (source, to_text(e)))
+ b_tmp_source = to_bytes(tmp_source, errors='surrogate_or_strict')
+
+ # template the source data locally & get ready to transfer
+ try:
+ with open(b_tmp_source, 'rb') as f:
+ try:
+ template_data = to_text(f.read(), errors='surrogate_or_strict')
+ except UnicodeError:
+ raise AnsibleActionFail("Template source files must be utf-8 encoded")
+
+ # add ansible 'template' vars
+ temp_vars = task_vars.copy()
+ old_vars = self._templar.available_variables
+
+ self._templar.environment.newline_sequence = newline_sequence
+ if block_start_string is not None:
+ self._templar.environment.block_start_string = block_start_string
+ if block_end_string is not None:
+ self._templar.environment.block_end_string = block_end_string
+ if variable_start_string is not None:
+ self._templar.environment.variable_start_string = variable_start_string
+ if variable_end_string is not None:
+ self._templar.environment.variable_end_string = variable_end_string
+ self._templar.environment.trim_blocks = trim_blocks
+ self._templar.environment.lstrip_blocks = lstrip_blocks
+ self._templar.available_variables = temp_vars
+ resultant = self._templar.do_template(template_data, preserve_trailing_newlines=True, escape_backslashes=False)
+ self._templar.available_variables = old_vars
+ resource_definition = self._task.args.get('definition', None)
+ if not resource_definition:
+ new_module_args.pop('template')
+ new_module_args['definition'] = resultant
+ except AnsibleAction:
+ raise
+ except Exception as e:
+ raise AnsibleActionFail("%s: %s" % (type(e).__name__, to_text(e)))
+ finally:
+ self._loader.cleanup_tmp_file(b_tmp_source)
+
+ # Execute the k8s_* module.
+ module_return = self._execute_module(module_name=self._task.action, module_args=new_module_args, task_vars=task_vars)
+
+ # Delete tmp path
+ self._remove_tmp_path(self._connection._shell.tmpdir)
+
+ result.update(module_return)
+
+ return self._ensure_invocation(result)
diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/plugins/action/k8s_scale.py b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/action/k8s_scale.py
new file mode 100644
index 00000000..6b26225f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/action/k8s_scale.py
@@ -0,0 +1,198 @@
+# Copyright (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+# Copyright (c) 2017, Toshio Kuratomi <tkuraotmi@ansible.com>
+# Copyright (c) 2020, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import copy
+import traceback
+
+from ansible.config.manager import ensure_type
+from ansible.errors import AnsibleError, AnsibleFileNotFound, AnsibleAction, AnsibleActionFail
+from ansible.module_utils.parsing.convert_bool import boolean
+from ansible.module_utils.six import string_types
+from ansible.module_utils._text import to_text, to_bytes, to_native
+from ansible.plugins.action import ActionBase
+
+
+class ActionModule(ActionBase):
+
+ TRANSFERS_FILES = True
+ DEFAULT_NEWLINE_SEQUENCE = "\n"
+
+ def _ensure_invocation(self, result):
+ # NOTE: adding invocation arguments here needs to be kept in sync with
+ # any no_log specified in the argument_spec in the module.
+ if 'invocation' not in result:
+ if self._play_context.no_log:
+ result['invocation'] = "CENSORED: no_log is set"
+ else:
+ result['invocation'] = self._task.args.copy()
+ result['invocation']['module_args'] = self._task.args.copy()
+
+ return result
+
+ def run(self, tmp=None, task_vars=None):
+ ''' handler for k8s options '''
+ if task_vars is None:
+ task_vars = dict()
+
+ result = super(ActionModule, self).run(tmp, task_vars)
+ del tmp # tmp no longer has any effect
+
+ new_module_args = copy.deepcopy(self._task.args)
+ kubeconfig = self._task.args.get('kubeconfig', None)
+ # find the file in the expected search path
+ if kubeconfig:
+ try:
+ # find in expected paths
+ kubeconfig = self._find_needle('files', kubeconfig)
+ except AnsibleError as e:
+ result['failed'] = True
+ result['msg'] = to_text(e)
+ result['exception'] = traceback.format_exc()
+ return result
+
+ if kubeconfig:
+ # decrypt kubeconfig found
+ actual_file = self._loader.get_real_file(kubeconfig, decrypt=True)
+ new_module_args['kubeconfig'] = actual_file
+
+ # find the file in the expected search path
+ src = self._task.args.get('src', None)
+ if src:
+ try:
+ # find in expected paths
+ src = self._find_needle('files', src)
+ except AnsibleError as e:
+ result['failed'] = True
+ result['msg'] = to_text(e)
+ result['exception'] = traceback.format_exc()
+ return result
+
+ if src:
+ new_module_args['src'] = src
+
+ template = self._task.args.get('template', None)
+ if template:
+ # template is only supported by k8s module.
+ if self._task.action not in ('k8s', 'community.kubernetes.k8s', 'community.okd.k8s'):
+ raise AnsibleActionFail("'template' is only supported parameter for 'k8s' module.")
+ if isinstance(template, string_types):
+ # treat this as raw_params
+ template_path = template
+ newline_sequence = self.DEFAULT_NEWLINE_SEQUENCE
+ variable_start_string = None
+ variable_end_string = None
+ block_start_string = None
+ block_end_string = None
+ trim_blocks = True
+ lstrip_blocks = False
+ elif isinstance(template, dict):
+ template_args = template
+ template_path = template_args.get('path', None)
+ if not template:
+ raise AnsibleActionFail("Please specify path for template.")
+
+ # Options type validation strings
+ for s_type in ('newline_sequence', 'variable_start_string', 'variable_end_string', 'block_start_string',
+ 'block_end_string'):
+ if s_type in template_args:
+ value = ensure_type(template_args[s_type], 'string')
+ if value is not None and not isinstance(value, string_types):
+ raise AnsibleActionFail("%s is expected to be a string, but got %s instead" % (s_type, type(value)))
+ try:
+ trim_blocks = boolean(template_args.get('trim_blocks', True), strict=False)
+ lstrip_blocks = boolean(template_args.get('lstrip_blocks', False), strict=False)
+ except TypeError as e:
+ raise AnsibleActionFail(to_native(e))
+
+ newline_sequence = template_args.get('newline_sequence', self.DEFAULT_NEWLINE_SEQUENCE)
+ variable_start_string = template_args.get('variable_start_string', None)
+ variable_end_string = template_args.get('variable_end_string', None)
+ block_start_string = template_args.get('block_start_string', None)
+ block_end_string = template_args.get('block_end_string', None)
+ else:
+ raise AnsibleActionFail("Error while reading template file - "
+ "a string or dict for template expected, but got %s instead" % type(template))
+ try:
+ source = self._find_needle('templates', template_path)
+ except AnsibleError as e:
+ raise AnsibleActionFail(to_text(e))
+
+ # Option `lstrip_blocks' was added in Jinja2 version 2.7.
+ if lstrip_blocks:
+ try:
+ import jinja2.defaults
+ except ImportError:
+ raise AnsibleError('Unable to import Jinja2 defaults for determining Jinja2 features.')
+
+ try:
+ jinja2.defaults.LSTRIP_BLOCKS
+ except AttributeError:
+ raise AnsibleError("Option `lstrip_blocks' is only available in Jinja2 versions >=2.7")
+
+ wrong_sequences = ["\\n", "\\r", "\\r\\n"]
+ allowed_sequences = ["\n", "\r", "\r\n"]
+
+ # We need to convert unescaped sequences to proper escaped sequences for Jinja2
+ if newline_sequence in wrong_sequences:
+ newline_sequence = allowed_sequences[wrong_sequences.index(newline_sequence)]
+ elif newline_sequence not in allowed_sequences:
+ raise AnsibleActionFail("newline_sequence needs to be one of: \n, \r or \r\n")
+
+ # Get vault decrypted tmp file
+ try:
+ tmp_source = self._loader.get_real_file(source)
+ except AnsibleFileNotFound as e:
+ raise AnsibleActionFail("could not find template=%s, %s" % (source, to_text(e)))
+ b_tmp_source = to_bytes(tmp_source, errors='surrogate_or_strict')
+
+ # template the source data locally & get ready to transfer
+ try:
+ with open(b_tmp_source, 'rb') as f:
+ try:
+ template_data = to_text(f.read(), errors='surrogate_or_strict')
+ except UnicodeError:
+ raise AnsibleActionFail("Template source files must be utf-8 encoded")
+
+ # add ansible 'template' vars
+ temp_vars = task_vars.copy()
+ old_vars = self._templar.available_variables
+
+ self._templar.environment.newline_sequence = newline_sequence
+ if block_start_string is not None:
+ self._templar.environment.block_start_string = block_start_string
+ if block_end_string is not None:
+ self._templar.environment.block_end_string = block_end_string
+ if variable_start_string is not None:
+ self._templar.environment.variable_start_string = variable_start_string
+ if variable_end_string is not None:
+ self._templar.environment.variable_end_string = variable_end_string
+ self._templar.environment.trim_blocks = trim_blocks
+ self._templar.environment.lstrip_blocks = lstrip_blocks
+ self._templar.available_variables = temp_vars
+ resultant = self._templar.do_template(template_data, preserve_trailing_newlines=True, escape_backslashes=False)
+ self._templar.available_variables = old_vars
+ resource_definition = self._task.args.get('definition', None)
+ if not resource_definition:
+ new_module_args.pop('template')
+ new_module_args['definition'] = resultant
+ except AnsibleAction:
+ raise
+ except Exception as e:
+ raise AnsibleActionFail("%s: %s" % (type(e).__name__, to_text(e)))
+ finally:
+ self._loader.cleanup_tmp_file(b_tmp_source)
+
+ # Execute the k8s_* module.
+ module_return = self._execute_module(module_name=self._task.action, module_args=new_module_args, task_vars=task_vars)
+
+ # Delete tmp path
+ self._remove_tmp_path(self._connection._shell.tmpdir)
+
+ result.update(module_return)
+
+ return self._ensure_invocation(result)
diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/plugins/action/k8s_service.py b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/action/k8s_service.py
new file mode 100644
index 00000000..6b26225f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/action/k8s_service.py
@@ -0,0 +1,198 @@
+# Copyright (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+# Copyright (c) 2017, Toshio Kuratomi <tkuraotmi@ansible.com>
+# Copyright (c) 2020, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import copy
+import traceback
+
+from ansible.config.manager import ensure_type
+from ansible.errors import AnsibleError, AnsibleFileNotFound, AnsibleAction, AnsibleActionFail
+from ansible.module_utils.parsing.convert_bool import boolean
+from ansible.module_utils.six import string_types
+from ansible.module_utils._text import to_text, to_bytes, to_native
+from ansible.plugins.action import ActionBase
+
+
+class ActionModule(ActionBase):
+
+ TRANSFERS_FILES = True
+ DEFAULT_NEWLINE_SEQUENCE = "\n"
+
+ def _ensure_invocation(self, result):
+ # NOTE: adding invocation arguments here needs to be kept in sync with
+ # any no_log specified in the argument_spec in the module.
+ if 'invocation' not in result:
+ if self._play_context.no_log:
+ result['invocation'] = "CENSORED: no_log is set"
+ else:
+ result['invocation'] = self._task.args.copy()
+ result['invocation']['module_args'] = self._task.args.copy()
+
+ return result
+
+ def run(self, tmp=None, task_vars=None):
+ ''' handler for k8s options '''
+ if task_vars is None:
+ task_vars = dict()
+
+ result = super(ActionModule, self).run(tmp, task_vars)
+ del tmp # tmp no longer has any effect
+
+ new_module_args = copy.deepcopy(self._task.args)
+ kubeconfig = self._task.args.get('kubeconfig', None)
+ # find the file in the expected search path
+ if kubeconfig:
+ try:
+ # find in expected paths
+ kubeconfig = self._find_needle('files', kubeconfig)
+ except AnsibleError as e:
+ result['failed'] = True
+ result['msg'] = to_text(e)
+ result['exception'] = traceback.format_exc()
+ return result
+
+ if kubeconfig:
+ # decrypt kubeconfig found
+ actual_file = self._loader.get_real_file(kubeconfig, decrypt=True)
+ new_module_args['kubeconfig'] = actual_file
+
+ # find the file in the expected search path
+ src = self._task.args.get('src', None)
+ if src:
+ try:
+ # find in expected paths
+ src = self._find_needle('files', src)
+ except AnsibleError as e:
+ result['failed'] = True
+ result['msg'] = to_text(e)
+ result['exception'] = traceback.format_exc()
+ return result
+
+ if src:
+ new_module_args['src'] = src
+
+ template = self._task.args.get('template', None)
+ if template:
+ # template is only supported by k8s module.
+ if self._task.action not in ('k8s', 'community.kubernetes.k8s', 'community.okd.k8s'):
+ raise AnsibleActionFail("'template' is only supported parameter for 'k8s' module.")
+ if isinstance(template, string_types):
+ # treat this as raw_params
+ template_path = template
+ newline_sequence = self.DEFAULT_NEWLINE_SEQUENCE
+ variable_start_string = None
+ variable_end_string = None
+ block_start_string = None
+ block_end_string = None
+ trim_blocks = True
+ lstrip_blocks = False
+ elif isinstance(template, dict):
+ template_args = template
+ template_path = template_args.get('path', None)
+ if not template:
+ raise AnsibleActionFail("Please specify path for template.")
+
+ # Options type validation strings
+ for s_type in ('newline_sequence', 'variable_start_string', 'variable_end_string', 'block_start_string',
+ 'block_end_string'):
+ if s_type in template_args:
+ value = ensure_type(template_args[s_type], 'string')
+ if value is not None and not isinstance(value, string_types):
+ raise AnsibleActionFail("%s is expected to be a string, but got %s instead" % (s_type, type(value)))
+ try:
+ trim_blocks = boolean(template_args.get('trim_blocks', True), strict=False)
+ lstrip_blocks = boolean(template_args.get('lstrip_blocks', False), strict=False)
+ except TypeError as e:
+ raise AnsibleActionFail(to_native(e))
+
+ newline_sequence = template_args.get('newline_sequence', self.DEFAULT_NEWLINE_SEQUENCE)
+ variable_start_string = template_args.get('variable_start_string', None)
+ variable_end_string = template_args.get('variable_end_string', None)
+ block_start_string = template_args.get('block_start_string', None)
+ block_end_string = template_args.get('block_end_string', None)
+ else:
+ raise AnsibleActionFail("Error while reading template file - "
+ "a string or dict for template expected, but got %s instead" % type(template))
+ try:
+ source = self._find_needle('templates', template_path)
+ except AnsibleError as e:
+ raise AnsibleActionFail(to_text(e))
+
+ # Option `lstrip_blocks' was added in Jinja2 version 2.7.
+ if lstrip_blocks:
+ try:
+ import jinja2.defaults
+ except ImportError:
+ raise AnsibleError('Unable to import Jinja2 defaults for determining Jinja2 features.')
+
+ try:
+ jinja2.defaults.LSTRIP_BLOCKS
+ except AttributeError:
+ raise AnsibleError("Option `lstrip_blocks' is only available in Jinja2 versions >=2.7")
+
+ wrong_sequences = ["\\n", "\\r", "\\r\\n"]
+ allowed_sequences = ["\n", "\r", "\r\n"]
+
+ # We need to convert unescaped sequences to proper escaped sequences for Jinja2
+ if newline_sequence in wrong_sequences:
+ newline_sequence = allowed_sequences[wrong_sequences.index(newline_sequence)]
+ elif newline_sequence not in allowed_sequences:
+ raise AnsibleActionFail("newline_sequence needs to be one of: \n, \r or \r\n")
+
+ # Get vault decrypted tmp file
+ try:
+ tmp_source = self._loader.get_real_file(source)
+ except AnsibleFileNotFound as e:
+ raise AnsibleActionFail("could not find template=%s, %s" % (source, to_text(e)))
+ b_tmp_source = to_bytes(tmp_source, errors='surrogate_or_strict')
+
+ # template the source data locally & get ready to transfer
+ try:
+ with open(b_tmp_source, 'rb') as f:
+ try:
+ template_data = to_text(f.read(), errors='surrogate_or_strict')
+ except UnicodeError:
+ raise AnsibleActionFail("Template source files must be utf-8 encoded")
+
+ # add ansible 'template' vars
+ temp_vars = task_vars.copy()
+ old_vars = self._templar.available_variables
+
+ self._templar.environment.newline_sequence = newline_sequence
+ if block_start_string is not None:
+ self._templar.environment.block_start_string = block_start_string
+ if block_end_string is not None:
+ self._templar.environment.block_end_string = block_end_string
+ if variable_start_string is not None:
+ self._templar.environment.variable_start_string = variable_start_string
+ if variable_end_string is not None:
+ self._templar.environment.variable_end_string = variable_end_string
+ self._templar.environment.trim_blocks = trim_blocks
+ self._templar.environment.lstrip_blocks = lstrip_blocks
+ self._templar.available_variables = temp_vars
+ resultant = self._templar.do_template(template_data, preserve_trailing_newlines=True, escape_backslashes=False)
+ self._templar.available_variables = old_vars
+ resource_definition = self._task.args.get('definition', None)
+ if not resource_definition:
+ new_module_args.pop('template')
+ new_module_args['definition'] = resultant
+ except AnsibleAction:
+ raise
+ except Exception as e:
+ raise AnsibleActionFail("%s: %s" % (type(e).__name__, to_text(e)))
+ finally:
+ self._loader.cleanup_tmp_file(b_tmp_source)
+
+ # Execute the k8s_* module.
+ module_return = self._execute_module(module_name=self._task.action, module_args=new_module_args, task_vars=task_vars)
+
+ # Delete tmp path
+ self._remove_tmp_path(self._connection._shell.tmpdir)
+
+ result.update(module_return)
+
+ return self._ensure_invocation(result)
diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/plugins/connection/kubectl.py b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/connection/kubectl.py
new file mode 100644
index 00000000..5c16fe9e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/connection/kubectl.py
@@ -0,0 +1,369 @@
+# Based on the docker connection plugin
+#
+# Connection plugin for configuring kubernetes containers with kubectl
+# (c) 2017, XuXinkun <xuxinkun@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r"""
+ author:
+ - xuxinkun
+
+ connection: kubectl
+
+ short_description: Execute tasks in pods running on Kubernetes.
+
+ description:
+ - Use the kubectl exec command to run tasks in, or put/fetch files to, pods running on the Kubernetes
+ container platform.
+
+ requirements:
+ - kubectl (go binary)
+
+ options:
+ kubectl_pod:
+ description:
+ - Pod name.
+ - Required when the host name does not match pod name.
+ default: ''
+ vars:
+ - name: ansible_kubectl_pod
+ env:
+ - name: K8S_AUTH_POD
+ kubectl_container:
+ description:
+ - Container name.
+ - Required when a pod contains more than one container.
+ default: ''
+ vars:
+ - name: ansible_kubectl_container
+ env:
+ - name: K8S_AUTH_CONTAINER
+ kubectl_namespace:
+ description:
+ - The namespace of the pod
+ default: ''
+ vars:
+ - name: ansible_kubectl_namespace
+ env:
+ - name: K8S_AUTH_NAMESPACE
+ kubectl_extra_args:
+ description:
+ - Extra arguments to pass to the kubectl command line.
+ - Please be aware that this passes information directly on the command line and it could expose sensitive data.
+ default: ''
+ vars:
+ - name: ansible_kubectl_extra_args
+ env:
+ - name: K8S_AUTH_EXTRA_ARGS
+ kubectl_kubeconfig:
+ description:
+ - Path to a kubectl config file. Defaults to I(~/.kube/config)
+ default: ''
+ vars:
+ - name: ansible_kubectl_kubeconfig
+ - name: ansible_kubectl_config
+ env:
+ - name: K8S_AUTH_KUBECONFIG
+ kubectl_context:
+ description:
+ - The name of a context found in the K8s config file.
+ default: ''
+ vars:
+ - name: ansible_kubectl_context
+ env:
+ - name: K8S_AUTH_CONTEXT
+ kubectl_host:
+ description:
+ - URL for accessing the API.
+ default: ''
+ vars:
+ - name: ansible_kubectl_host
+ - name: ansible_kubectl_server
+ env:
+ - name: K8S_AUTH_HOST
+ - name: K8S_AUTH_SERVER
+ kubectl_username:
+ description:
+ - Provide a username for authenticating with the API.
+ default: ''
+ vars:
+ - name: ansible_kubectl_username
+ - name: ansible_kubectl_user
+ env:
+ - name: K8S_AUTH_USERNAME
+ kubectl_password:
+ description:
+ - Provide a password for authenticating with the API.
+ - Please be aware that this passes information directly on the command line and it could expose sensitive data.
+ We recommend using the file based authentication options instead.
+ default: ''
+ vars:
+ - name: ansible_kubectl_password
+ env:
+ - name: K8S_AUTH_PASSWORD
+ kubectl_token:
+ description:
+ - API authentication bearer token.
+ - Please be aware that this passes information directly on the command line and it could expose sensitive data.
+ We recommend using the file based authentication options instead.
+ vars:
+ - name: ansible_kubectl_token
+ - name: ansible_kubectl_api_key
+ env:
+ - name: K8S_AUTH_TOKEN
+ - name: K8S_AUTH_API_KEY
+ client_cert:
+ description:
+ - Path to a certificate used to authenticate with the API.
+ default: ''
+ vars:
+ - name: ansible_kubectl_cert_file
+ - name: ansible_kubectl_client_cert
+ env:
+ - name: K8S_AUTH_CERT_FILE
+ aliases: [ kubectl_cert_file ]
+ client_key:
+ description:
+ - Path to a key file used to authenticate with the API.
+ default: ''
+ vars:
+ - name: ansible_kubectl_key_file
+ - name: ansible_kubectl_client_key
+ env:
+ - name: K8S_AUTH_KEY_FILE
+ aliases: [ kubectl_key_file ]
+ ca_cert:
+ description:
+ - Path to a CA certificate used to authenticate with the API.
+ default: ''
+ vars:
+ - name: ansible_kubectl_ssl_ca_cert
+ - name: ansible_kubectl_ca_cert
+ env:
+ - name: K8S_AUTH_SSL_CA_CERT
+ aliases: [ kubectl_ssl_ca_cert ]
+ validate_certs:
+ description:
+ - Whether or not to verify the API server's SSL certificate. Defaults to I(true).
+ default: ''
+ vars:
+ - name: ansible_kubectl_verify_ssl
+ - name: ansible_kubectl_validate_certs
+ env:
+ - name: K8S_AUTH_VERIFY_SSL
+ aliases: [ kubectl_verify_ssl ]
+"""
+
+import distutils.spawn
+import os
+import os.path
+import subprocess
+
+from ansible.parsing.yaml.loader import AnsibleLoader
+from ansible.errors import AnsibleError, AnsibleFileNotFound
+from ansible.module_utils.six.moves import shlex_quote
+from ansible.module_utils._text import to_bytes
+from ansible.plugins.connection import ConnectionBase, BUFSIZE
+from ansible.utils.display import Display
+
+display = Display()
+
+
+CONNECTION_TRANSPORT = 'kubectl'
+
+CONNECTION_OPTIONS = {
+ 'kubectl_container': '-c',
+ 'kubectl_namespace': '-n',
+ 'kubectl_kubeconfig': '--kubeconfig',
+ 'kubectl_context': '--context',
+ 'kubectl_host': '--server',
+ 'kubectl_username': '--username',
+ 'kubectl_password': '--password',
+ 'client_cert': '--client-certificate',
+ 'client_key': '--client-key',
+ 'ca_cert': '--certificate-authority',
+ 'validate_certs': '--insecure-skip-tls-verify',
+ 'kubectl_token': '--token'
+}
+
+
+class Connection(ConnectionBase):
+ ''' Local kubectl based connections '''
+
+ transport = CONNECTION_TRANSPORT
+ connection_options = CONNECTION_OPTIONS
+ documentation = DOCUMENTATION
+ has_pipelining = True
+ transport_cmd = None
+
+ def __init__(self, play_context, new_stdin, *args, **kwargs):
+ super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
+
+ # Note: kubectl runs commands as the user that started the container.
+ # It is impossible to set the remote user for a kubectl connection.
+ cmd_arg = '{0}_command'.format(self.transport)
+ if cmd_arg in kwargs:
+ self.transport_cmd = kwargs[cmd_arg]
+ else:
+ self.transport_cmd = distutils.spawn.find_executable(self.transport)
+ if not self.transport_cmd:
+ raise AnsibleError("{0} command not found in PATH".format(self.transport))
+
+ def _build_exec_cmd(self, cmd):
+ """ Build the local kubectl exec command to run cmd on remote_host
+ """
+ local_cmd = [self.transport_cmd]
+ censored_local_cmd = [self.transport_cmd]
+
+ # Build command options based on doc string
+ doc_yaml = AnsibleLoader(self.documentation).get_single_data()
+ for key in doc_yaml.get('options'):
+ if key.endswith('verify_ssl') and self.get_option(key) != '':
+ # Translate verify_ssl to skip_verify_ssl, and output as string
+ skip_verify_ssl = not self.get_option(key)
+ local_cmd.append(u'{0}={1}'.format(self.connection_options[key], str(skip_verify_ssl).lower()))
+ censored_local_cmd.append(u'{0}={1}'.format(self.connection_options[key], str(skip_verify_ssl).lower()))
+ elif not key.endswith('container') and self.get_option(key) and self.connection_options.get(key):
+ cmd_arg = self.connection_options[key]
+ local_cmd += [cmd_arg, self.get_option(key)]
+ # Redact password and token from console log
+ if key.endswith(('_token', '_password')):
+ censored_local_cmd += [cmd_arg, '********']
+
+ extra_args_name = u'{0}_extra_args'.format(self.transport)
+ if self.get_option(extra_args_name):
+ local_cmd += self.get_option(extra_args_name).split(' ')
+ censored_local_cmd += self.get_option(extra_args_name).split(' ')
+
+ pod = self.get_option(u'{0}_pod'.format(self.transport))
+ if not pod:
+ pod = self._play_context.remote_addr
+ # -i is needed to keep stdin open which allows pipelining to work
+ local_cmd += ['exec', '-i', pod]
+ censored_local_cmd += ['exec', '-i', pod]
+
+ # if the pod has more than one container, then container is required
+ container_arg_name = u'{0}_container'.format(self.transport)
+ if self.get_option(container_arg_name):
+ local_cmd += ['-c', self.get_option(container_arg_name)]
+ censored_local_cmd += ['-c', self.get_option(container_arg_name)]
+
+ local_cmd += ['--'] + cmd
+ censored_local_cmd += ['--'] + cmd
+
+ return local_cmd, censored_local_cmd
+
+ def _connect(self, port=None):
+ """ Connect to the container. Nothing to do """
+ super(Connection, self)._connect()
+ if not self._connected:
+ display.vvv(u"ESTABLISH {0} CONNECTION".format(self.transport), host=self._play_context.remote_addr)
+ self._connected = True
+
+ def exec_command(self, cmd, in_data=None, sudoable=False):
+ """ Run a command in the container """
+ super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
+
+ local_cmd, censored_local_cmd = self._build_exec_cmd([self._play_context.executable, '-c', cmd])
+
+ display.vvv("EXEC %s" % (censored_local_cmd,), host=self._play_context.remote_addr)
+ local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]
+ p = subprocess.Popen(local_cmd, shell=False, stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+
+ stdout, stderr = p.communicate(in_data)
+ return (p.returncode, stdout, stderr)
+
+ def _prefix_login_path(self, remote_path):
+ ''' Make sure that we put files into a standard path
+
+ If a path is relative, then we need to choose where to put it.
+ ssh chooses $HOME but we aren't guaranteed that a home dir will
+ exist in any given chroot. So for now we're choosing "/" instead.
+ This also happens to be the former default.
+
+ Can revisit using $HOME instead if it's a problem
+ '''
+ if not remote_path.startswith(os.path.sep):
+ remote_path = os.path.join(os.path.sep, remote_path)
+ return os.path.normpath(remote_path)
+
+ def put_file(self, in_path, out_path):
+ """ Transfer a file from local to the container """
+ super(Connection, self).put_file(in_path, out_path)
+ display.vvv("PUT %s TO %s" % (in_path, out_path), host=self._play_context.remote_addr)
+
+ out_path = self._prefix_login_path(out_path)
+ if not os.path.exists(to_bytes(in_path, errors='surrogate_or_strict')):
+ raise AnsibleFileNotFound(
+ "file or module does not exist: %s" % in_path)
+
+ out_path = shlex_quote(out_path)
+ # kubectl doesn't have native support for copying files into
+ # running containers, so we use kubectl exec to implement this
+ with open(to_bytes(in_path, errors='surrogate_or_strict'), 'rb') as in_file:
+ if not os.fstat(in_file.fileno()).st_size:
+ count = ' count=0'
+ else:
+ count = ''
+ args, dummy = self._build_exec_cmd([self._play_context.executable, "-c", "dd of=%s bs=%s%s" % (out_path, BUFSIZE, count)])
+ args = [to_bytes(i, errors='surrogate_or_strict') for i in args]
+ try:
+ p = subprocess.Popen(args, stdin=in_file,
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ except OSError:
+ raise AnsibleError("kubectl connection requires dd command in the container to put files")
+ stdout, stderr = p.communicate()
+
+ if p.returncode != 0:
+ raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr))
+
+ def fetch_file(self, in_path, out_path):
+ """ Fetch a file from container to local. """
+ super(Connection, self).fetch_file(in_path, out_path)
+ display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self._play_context.remote_addr)
+
+ in_path = self._prefix_login_path(in_path)
+ out_dir = os.path.dirname(out_path)
+
+ # kubectl doesn't have native support for fetching files from
+ # running containers, so we use kubectl exec to implement this
+ args, dummy = self._build_exec_cmd([self._play_context.executable, "-c", "dd if=%s bs=%s" % (in_path, BUFSIZE)])
+ args = [to_bytes(i, errors='surrogate_or_strict') for i in args]
+ actual_out_path = os.path.join(out_dir, os.path.basename(in_path))
+ with open(to_bytes(actual_out_path, errors='surrogate_or_strict'), 'wb') as out_file:
+ try:
+ p = subprocess.Popen(args, stdin=subprocess.PIPE,
+ stdout=out_file, stderr=subprocess.PIPE)
+ except OSError:
+ raise AnsibleError(
+ "{0} connection requires dd command in the container to fetch files".format(self.transport)
+ )
+ stdout, stderr = p.communicate()
+
+ if p.returncode != 0:
+ raise AnsibleError("failed to fetch file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr))
+
+ if actual_out_path != out_path:
+ os.rename(to_bytes(actual_out_path, errors='strict'), to_bytes(out_path, errors='strict'))
+
+ def close(self):
+ """ Terminate the connection. Nothing to do for kubectl"""
+ super(Connection, self).close()
+ self._connected = False
diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/plugins/doc_fragments/__init__.py b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/doc_fragments/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/doc_fragments/__init__.py
diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/plugins/doc_fragments/helm_common_options.py b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/doc_fragments/helm_common_options.py
new file mode 100644
index 00000000..f13bc1e3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/doc_fragments/helm_common_options.py
@@ -0,0 +1,34 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2020, Ansible Project
+# Copyright: (c) 2020, Red Hat Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Options for common Helm modules
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ DOCUMENTATION = r'''
+options:
+ binary_path:
+ description:
+ - The path of a helm binary to use.
+ required: false
+ type: path
+ context:
+ description:
+ - Helm option to specify which kubeconfig context to use.
+ - If the value is not specified in the task, the value of environment variable C(K8S_AUTH_CONTEXT) will be used instead.
+ type: str
+ aliases: [ kube_context ]
+ kubeconfig:
+ description:
+ - Helm option to specify kubeconfig path to use.
+ - If the value is not specified in the task, the value of environment variable C(K8S_AUTH_KUBECONFIG) will be used instead.
+ type: path
+ aliases: [ kubeconfig_path ]
+'''
diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/plugins/doc_fragments/k8s_auth_options.py b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/doc_fragments/k8s_auth_options.py
new file mode 100644
index 00000000..053caed2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/doc_fragments/k8s_auth_options.py
@@ -0,0 +1,97 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Red Hat | Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Options for authenticating with the API.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ DOCUMENTATION = r'''
+options:
+ host:
+ description:
+ - Provide a URL for accessing the API. Can also be specified via K8S_AUTH_HOST environment variable.
+ type: str
+ api_key:
+ description:
+ - Token used to authenticate with the API. Can also be specified via K8S_AUTH_API_KEY environment variable.
+ type: str
+ kubeconfig:
+ description:
+ - Path to an existing Kubernetes config file. If not provided, and no other connection
+ options are provided, the openshift client will attempt to load the default
+ configuration file from I(~/.kube/config.json). Can also be specified via K8S_AUTH_KUBECONFIG environment
+ variable.
+ type: path
+ context:
+ description:
+ - The name of a context found in the config file. Can also be specified via K8S_AUTH_CONTEXT environment variable.
+ type: str
+ username:
+ description:
+ - Provide a username for authenticating with the API. Can also be specified via K8S_AUTH_USERNAME environment
+ variable.
+ - Please note that this only works with clusters configured to use HTTP Basic Auth. If your cluster has a
+ different form of authentication (e.g. OAuth2 in OpenShift), this option will not work as expected and you
+ should look into the C(k8s_auth) module, as that might do what you need.
+ type: str
+ password:
+ description:
+ - Provide a password for authenticating with the API. Can also be specified via K8S_AUTH_PASSWORD environment
+ variable.
+ - Please read the description of the C(username) option for a discussion of when this option is applicable.
+ type: str
+ client_cert:
+ description:
+ - Path to a certificate used to authenticate with the API. Can also be specified via K8S_AUTH_CERT_FILE environment
+ variable.
+ type: path
+ aliases: [ cert_file ]
+ client_key:
+ description:
+ - Path to a key file used to authenticate with the API. Can also be specified via K8S_AUTH_KEY_FILE environment
+ variable.
+ type: path
+ aliases: [ key_file ]
+ ca_cert:
+ description:
+ - Path to a CA certificate used to authenticate with the API. The full certificate chain must be provided to
+ avoid certificate validation errors. Can also be specified via K8S_AUTH_SSL_CA_CERT environment variable.
+ type: path
+ aliases: [ ssl_ca_cert ]
+ validate_certs:
+ description:
+ - Whether or not to verify the API server's SSL certificates. Can also be specified via K8S_AUTH_VERIFY_SSL
+ environment variable.
+ type: bool
+ aliases: [ verify_ssl ]
+ proxy:
+ description:
+ - The URL of an HTTP proxy to use for the connection. Can also be specified via K8S_AUTH_PROXY environment variable.
+ - Please note that this module does not pick up typical proxy settings from the environment (e.g. HTTP_PROXY).
+ type: str
+ persist_config:
+ description:
+ - Whether or not to save the kube config refresh tokens.
+ Can also be specified via K8S_AUTH_PERSIST_CONFIG environment variable.
+ - When the k8s context is using a user credentials with refresh tokens (like oidc or gke/gcloud auth),
+ the token is refreshed by the k8s python client library but not saved by default. So the old refresh token can
+ expire and the next auth might fail. Setting this flag to true will tell the k8s python client to save the
+ new refresh token to the kube config file.
+ - Default to false.
+ - Please note that the current version of the k8s python client library does not support setting this flag to True yet.
+ - "The fix for this k8s python library is here: https://github.com/kubernetes-client/python-base/pull/169"
+ type: bool
+notes:
+ - "The OpenShift Python client wraps the K8s Python client, providing full access to
+ all of the APIS and models available on both platforms. For API version details and
+ additional information visit https://github.com/openshift/openshift-restclient-python"
+ - "To avoid SSL certificate validation errors when C(validate_certs) is I(True), the full
+ certificate chain for the API server must be provided via C(ca_cert) or in the
+ kubeconfig file."
+'''
diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/plugins/doc_fragments/k8s_name_options.py b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/doc_fragments/k8s_name_options.py
new file mode 100644
index 00000000..fe4e5c47
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/doc_fragments/k8s_name_options.py
@@ -0,0 +1,52 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Red Hat | Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Options for selecting or identifying a specific K8s object
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ DOCUMENTATION = r'''
+options:
+ api_version:
+ description:
+ - Use to specify the API version.
+ - Use to create, delete, or discover an object without providing a full resource definition.
+ - Use in conjunction with I(kind), I(name), and I(namespace) to identify a specific object.
+ - If I(resource definition) is provided, the I(apiVersion) value from the I(resource_definition)
+ will override this option.
+ type: str
+ default: v1
+ aliases:
+ - api
+ - version
+ kind:
+ description:
+ - Use to specify an object model.
+ - Use to create, delete, or discover an object without providing a full resource definition.
+ - Use in conjunction with I(api_version), I(name), and I(namespace) to identify a specific object.
+ - If I(resource definition) is provided, the I(kind) value from the I(resource_definition)
+ will override this option.
+ type: str
+ name:
+ description:
+ - Use to specify an object name.
+ - Use to create, delete, or discover an object without providing a full resource definition.
+ - Use in conjunction with I(api_version), I(kind) and I(namespace) to identify a specific object.
+ - If I(resource definition) is provided, the I(metadata.name) value from the I(resource_definition)
+ will override this option.
+ type: str
+ namespace:
+ description:
+ - Use to specify an object namespace.
+ - Useful when creating, deleting, or discovering an object without providing a full resource definition.
+ - Use in conjunction with I(api_version), I(kind), and I(name) to identify a specific object.
+ - If I(resource definition) is provided, the I(metadata.namespace) value from the I(resource_definition)
+ will override this option.
+ type: str
+'''
diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/plugins/doc_fragments/k8s_resource_options.py b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/doc_fragments/k8s_resource_options.py
new file mode 100644
index 00000000..b9dcfe16
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/doc_fragments/k8s_resource_options.py
@@ -0,0 +1,33 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Red Hat | Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Options for providing an object configuration
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ DOCUMENTATION = r'''
+options:
+ resource_definition:
+ description:
+ - Provide a valid YAML definition (either as a string, list, or dict) for an object when creating or updating.
+ - "NOTE: I(kind), I(api_version), I(name), and I(namespace) will be overwritten by corresponding values found in the provided I(resource_definition)."
+ aliases:
+ - definition
+ - inline
+ src:
+ description:
+ - "Provide a path to a file containing a valid YAML definition of an object or objects to be created or updated. Mutually
+ exclusive with I(resource_definition). NOTE: I(kind), I(api_version), I(name), and I(namespace) will be
+ overwritten by corresponding values found in the configuration read in from the I(src) file."
+ - Reads from the local file system. To read from the Ansible controller's file system, including vaulted files, use the file lookup
+ plugin or template lookup plugin, combined with the from_yaml filter, and pass the result to
+ I(resource_definition). See Examples below.
+ - Mutually exclusive with I(template) in case of M(k8s) module.
+ type: path
+'''
diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/plugins/doc_fragments/k8s_scale_options.py b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/doc_fragments/k8s_scale_options.py
new file mode 100644
index 00000000..0c01439a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/doc_fragments/k8s_scale_options.py
@@ -0,0 +1,43 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Red Hat | Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Options used by scale modules.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ DOCUMENTATION = r'''
+options:
+ replicas:
+ description:
+ - The desired number of replicas.
+ type: int
+ required: True
+ current_replicas:
+ description:
+ - For Deployment, ReplicaSet, Replication Controller, only scale, if the number of existing replicas
+ matches. In the case of a Job, update parallelism only if the current parallelism value matches.
+ type: int
+ resource_version:
+ description:
+ - Only attempt to scale, if the current object version matches.
+ type: str
+ wait:
+ description:
+ - For Deployment, ReplicaSet, Replication Controller, wait for the status value of I(ready_replicas) to change
+ to the number of I(replicas). In the case of a Job, this option is ignored.
+ type: bool
+ default: yes
+ wait_timeout:
+ description:
+ - When C(wait) is I(True), the number of seconds to wait for the I(ready_replicas) status to equal I(replicas).
+ If the status is not reached within the allotted time, an error will result. In the case of a Job, this option
+ is ignored.
+ type: int
+ default: 20
+'''
diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/plugins/doc_fragments/k8s_state_options.py b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/doc_fragments/k8s_state_options.py
new file mode 100644
index 00000000..8f741ba8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/doc_fragments/k8s_state_options.py
@@ -0,0 +1,30 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Red Hat | Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Options for specifying object state
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ DOCUMENTATION = r'''
+options:
+ state:
+ description:
+ - Determines if an object should be created, patched, or deleted. When set to C(present), an object will be
+ created, if it does not already exist. If set to C(absent), an existing object will be deleted. If set to
+ C(present), an existing object will be patched, if its attributes differ from those specified using
+ I(resource_definition) or I(src).
+ type: str
+ default: present
+ choices: [ absent, present ]
+ force:
+ description:
+ - If set to C(yes), and I(state) is C(present), an existing object will be replaced.
+ type: bool
+ default: no
+'''
diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/plugins/doc_fragments/k8s_wait_options.py b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/doc_fragments/k8s_wait_options.py
new file mode 100644
index 00000000..867901bb
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/doc_fragments/k8s_wait_options.py
@@ -0,0 +1,67 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2020, Red Hat | Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Options for specifying object wait
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ DOCUMENTATION = r'''
+options:
+ wait:
+ description:
+ - Whether to wait for certain resource kinds to end up in the desired state.
+ - By default the module exits once Kubernetes has received the request.
+ - Implemented for C(state=present) for C(Deployment), C(DaemonSet) and C(Pod), and for C(state=absent) for all resource kinds.
+ - For resource kinds without an implementation, C(wait) returns immediately unless C(wait_condition) is set.
+ default: no
+ type: bool
+ wait_sleep:
+ description:
+ - Number of seconds to sleep between checks.
+ default: 5
+ type: int
+ wait_timeout:
+ description:
+ - How long in seconds to wait for the resource to end up in the desired state.
+ - Ignored if C(wait) is not set.
+ default: 120
+ type: int
+ wait_condition:
+ description:
+ - Specifies a custom condition on the status to wait for.
+ - Ignored if C(wait) is not set or is set to False.
+ suboptions:
+ type:
+ type: str
+ description:
+ - The type of condition to wait for.
+ - For example, the C(Pod) resource will set the C(Ready) condition (among others).
+ - Required if you are specifying a C(wait_condition).
+ - If left empty, the C(wait_condition) field will be ignored.
+ - The possible types for a condition are specific to each resource type in Kubernetes.
+ - See the API documentation of the status field for a given resource to see possible choices.
+ status:
+ type: str
+ description:
+ - The value of the status field in your desired condition.
+ - For example, if a C(Deployment) is paused, the C(Progressing) C(type) will have the C(Unknown) status.
+ choices:
+ - True
+ - False
+ - Unknown
+ default: "True"
+ reason:
+ type: str
+ description:
+ - The value of the reason field in your desired condition
+ - For example, if a C(Deployment) is paused, The C(Progressing) C(type) will have the C(DeploymentPaused) reason.
+ - The possible reasons in a condition are specific to each resource type in Kubernetes.
+ - See the API documentation of the status field for a given resource to see possible choices.
+ type: dict
+'''
diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/plugins/filter/k8s.py b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/filter/k8s.py
new file mode 100644
index 00000000..3597b852
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/filter/k8s.py
@@ -0,0 +1,33 @@
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+try:
+ from openshift.helper.hashes import generate_hash
+ HAS_GENERATE_HASH = True
+except ImportError:
+ HAS_GENERATE_HASH = False
+
+from ansible.errors import AnsibleFilterError
+
+
+def k8s_config_resource_name(resource):
+ if not HAS_GENERATE_HASH:
+ raise AnsibleFilterError("k8s_config_resource_name requires openshift>=0.7.2")
+ try:
+ return resource['metadata']['name'] + '-' + generate_hash(resource)
+ except KeyError:
+ raise AnsibleFilterError("resource must have a metadata.name key to generate a resource name")
+
+
+# ---- Ansible filters ----
+class FilterModule(object):
+
+ def filters(self):
+ return {
+ 'k8s_config_resource_name': k8s_config_resource_name
+ }
diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/plugins/inventory/k8s.py b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/inventory/k8s.py
new file mode 100644
index 00000000..ede54375
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/inventory/k8s.py
@@ -0,0 +1,363 @@
+# Copyright (c) 2018 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ name: k8s
+ plugin_type: inventory
+ author:
+ - Chris Houseknecht <@chouseknecht>
+ - Fabian von Feilitzsch <@fabianvf>
+
+ short_description: Kubernetes (K8s) inventory source
+
+ description:
+ - Fetch containers and services for one or more clusters.
+ - Groups by cluster name, namespace, namespace_services, namespace_pods, and labels.
+ - Uses the kubectl connection plugin to access the Kubernetes cluster.
+ - Uses k8s.(yml|yaml) YAML configuration file to set parameter values.
+
+ options:
+ plugin:
+ description: token that ensures this is a source file for the 'k8s' plugin.
+ required: True
+ choices: ['k8s']
+ connections:
+ description:
+ - Optional list of cluster connection settings. If no connections are provided, the default
+ I(~/.kube/config) and active context will be used, and objects will be returned for all namespaces
+ the active user is authorized to access.
+ suboptions:
+ name:
+ description:
+ - Optional name to assign to the cluster. If not provided, a name is constructed from the server
+ and port.
+ kubeconfig:
+ description:
+ - Path to an existing Kubernetes config file. If not provided, and no other connection
+ options are provided, the OpenShift client will attempt to load the default
+ configuration file from I(~/.kube/config.json). Can also be specified via K8S_AUTH_KUBECONFIG
+ environment variable.
+ context:
+ description:
+ - The name of a context found in the config file. Can also be specified via K8S_AUTH_CONTEXT environment
+ variable.
+ host:
+ description:
+ - Provide a URL for accessing the API. Can also be specified via K8S_AUTH_HOST environment variable.
+ api_key:
+ description:
+ - Token used to authenticate with the API. Can also be specified via K8S_AUTH_API_KEY environment
+ variable.
+ username:
+ description:
+ - Provide a username for authenticating with the API. Can also be specified via K8S_AUTH_USERNAME
+ environment variable.
+ password:
+ description:
+ - Provide a password for authenticating with the API. Can also be specified via K8S_AUTH_PASSWORD
+ environment variable.
+ client_cert:
+ description:
+ - Path to a certificate used to authenticate with the API. Can also be specified via K8S_AUTH_CERT_FILE
+ environment variable.
+ aliases: [ cert_file ]
+ client_key:
+ description:
+ - Path to a key file used to authenticate with the API. Can also be specified via K8S_AUTH_KEY_FILE
+ environment variable.
+ aliases: [ key_file ]
+ ca_cert:
+ description:
+ - Path to a CA certificate used to authenticate with the API. Can also be specified via
+ K8S_AUTH_SSL_CA_CERT environment variable.
+ aliases: [ ssl_ca_cert ]
+ validate_certs:
+ description:
+ - "Whether or not to verify the API server's SSL certificates. Can also be specified via
+ K8S_AUTH_VERIFY_SSL environment variable."
+ type: bool
+ aliases: [ verify_ssl ]
+ namespaces:
+ description:
+ - List of namespaces. If not specified, will fetch all containers for all namespaces user is authorized
+ to access.
+
+ requirements:
+ - "python >= 2.7"
+ - "openshift >= 0.6"
+ - "PyYAML >= 3.11"
+'''
+
+EXAMPLES = '''
+# File must be named k8s.yaml or k8s.yml
+
+# Authenticate with token, and return all pods and services for all namespaces
+plugin: community.kubernetes.k8s
+connections:
+ - host: https://192.168.64.4:8443
+ api_key: xxxxxxxxxxxxxxxx
+ validate_certs: false
+
+# Use default config (~/.kube/config) file and active context, and return objects for a specific namespace
+plugin: community.kubernetes.k8s
+connections:
+ - namespaces:
+ - testing
+
+# Use a custom config file, and a specific context.
+plugin: community.kubernetes.k8s
+connections:
+ - kubeconfig: /path/to/config
+ context: 'awx/192-168-64-4:8443/developer'
+'''
+
+import json
+
+from ansible.errors import AnsibleError
+from ansible_collections.community.kubernetes.plugins.module_utils.common import K8sAnsibleMixin, HAS_K8S_MODULE_HELPER, k8s_import_exception
+from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable
+
+try:
+ from openshift.dynamic.exceptions import DynamicApiError
+except ImportError:
+ pass
+
+
+def format_dynamic_api_exc(exc):
+ if exc.body:
+ if exc.headers and exc.headers.get('Content-Type') == 'application/json':
+ message = json.loads(exc.body).get('message')
+ if message:
+ return message
+ return exc.body
+ else:
+ return '%s Reason: %s' % (exc.status, exc.reason)
+
+
+class K8sInventoryException(Exception):
+ pass
+
+
+class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable, K8sAnsibleMixin):
+ NAME = 'community.kubernetes.k8s'
+
+ connection_plugin = 'community.kubernetes.kubectl'
+ transport = 'kubectl'
+
+ def parse(self, inventory, loader, path, cache=True):
+ super(InventoryModule, self).parse(inventory, loader, path)
+ cache_key = self._get_cache_prefix(path)
+ config_data = self._read_config_data(path)
+ self.setup(config_data, cache, cache_key)
+
+ def setup(self, config_data, cache, cache_key):
+ connections = config_data.get('connections')
+
+ if not HAS_K8S_MODULE_HELPER:
+ raise K8sInventoryException(
+ "This module requires the OpenShift Python client. Try `pip install openshift`. Detail: {0}".format(k8s_import_exception)
+ )
+
+ source_data = None
+ if cache and cache_key in self._cache:
+ try:
+ source_data = self._cache[cache_key]
+ except KeyError:
+ pass
+
+ if not source_data:
+ self.fetch_objects(connections)
+
+ def fetch_objects(self, connections):
+
+ if connections:
+ if not isinstance(connections, list):
+ raise K8sInventoryException("Expecting connections to be a list.")
+
+ for connection in connections:
+ if not isinstance(connection, dict):
+ raise K8sInventoryException("Expecting connection to be a dictionary.")
+ client = self.get_api_client(**connection)
+ name = connection.get('name', self.get_default_host_name(client.configuration.host))
+ if connection.get('namespaces'):
+ namespaces = connection['namespaces']
+ else:
+ namespaces = self.get_available_namespaces(client)
+ for namespace in namespaces:
+ self.get_pods_for_namespace(client, name, namespace)
+ self.get_services_for_namespace(client, name, namespace)
+ else:
+ client = self.get_api_client()
+ name = self.get_default_host_name(client.configuration.host)
+ namespaces = self.get_available_namespaces(client)
+ for namespace in namespaces:
+ self.get_pods_for_namespace(client, name, namespace)
+ self.get_services_for_namespace(client, name, namespace)
+
+ @staticmethod
+ def get_default_host_name(host):
+ return host.replace('https://', '').replace('http://', '').replace('.', '-').replace(':', '_')
+
+ def get_available_namespaces(self, client):
+ v1_namespace = client.resources.get(api_version='v1', kind='Namespace')
+ try:
+ obj = v1_namespace.get()
+ except DynamicApiError as exc:
+ self.display.debug(exc)
+ raise K8sInventoryException('Error fetching Namespace list: %s' % format_dynamic_api_exc(exc))
+ return [namespace.metadata.name for namespace in obj.items]
+
+ def get_pods_for_namespace(self, client, name, namespace):
+ v1_pod = client.resources.get(api_version='v1', kind='Pod')
+ try:
+ obj = v1_pod.get(namespace=namespace)
+ except DynamicApiError as exc:
+ self.display.debug(exc)
+ raise K8sInventoryException('Error fetching Pod list: %s' % format_dynamic_api_exc(exc))
+
+ namespace_group = 'namespace_{0}'.format(namespace)
+ namespace_pods_group = '{0}_pods'.format(namespace_group)
+
+ self.inventory.add_group(name)
+ self.inventory.add_group(namespace_group)
+ self.inventory.add_child(name, namespace_group)
+ self.inventory.add_group(namespace_pods_group)
+ self.inventory.add_child(namespace_group, namespace_pods_group)
+
+ for pod in obj.items:
+ pod_name = pod.metadata.name
+ pod_groups = []
+ pod_annotations = {} if not pod.metadata.annotations else dict(pod.metadata.annotations)
+
+ if pod.metadata.labels:
+ # create a group for each label_value
+ for key, value in pod.metadata.labels:
+ group_name = 'label_{0}_{1}'.format(key, value)
+ if group_name not in pod_groups:
+ pod_groups.append(group_name)
+ self.inventory.add_group(group_name)
+ pod_labels = dict(pod.metadata.labels)
+ else:
+ pod_labels = {}
+
+ if not pod.status.containerStatuses:
+ continue
+
+ for container in pod.status.containerStatuses:
+ # add each pod_container to the namespace group, and to each label_value group
+ container_name = '{0}_{1}'.format(pod.metadata.name, container.name)
+ self.inventory.add_host(container_name)
+ self.inventory.add_child(namespace_pods_group, container_name)
+ if pod_groups:
+ for group in pod_groups:
+ self.inventory.add_child(group, container_name)
+
+ # Add hostvars
+ self.inventory.set_variable(container_name, 'object_type', 'pod')
+ self.inventory.set_variable(container_name, 'labels', pod_labels)
+ self.inventory.set_variable(container_name, 'annotations', pod_annotations)
+ self.inventory.set_variable(container_name, 'cluster_name', pod.metadata.clusterName)
+ self.inventory.set_variable(container_name, 'pod_node_name', pod.spec.nodeName)
+ self.inventory.set_variable(container_name, 'pod_name', pod.spec.name)
+ self.inventory.set_variable(container_name, 'pod_host_ip', pod.status.hostIP)
+ self.inventory.set_variable(container_name, 'pod_phase', pod.status.phase)
+ self.inventory.set_variable(container_name, 'pod_ip', pod.status.podIP)
+ self.inventory.set_variable(container_name, 'pod_self_link', pod.metadata.selfLink)
+ self.inventory.set_variable(container_name, 'pod_resource_version', pod.metadata.resourceVersion)
+ self.inventory.set_variable(container_name, 'pod_uid', pod.metadata.uid)
+ self.inventory.set_variable(container_name, 'container_name', container.image)
+ self.inventory.set_variable(container_name, 'container_image', container.image)
+ if container.state.running:
+ self.inventory.set_variable(container_name, 'container_state', 'Running')
+ if container.state.terminated:
+ self.inventory.set_variable(container_name, 'container_state', 'Terminated')
+ if container.state.waiting:
+ self.inventory.set_variable(container_name, 'container_state', 'Waiting')
+ self.inventory.set_variable(container_name, 'container_ready', container.ready)
+ self.inventory.set_variable(container_name, 'ansible_remote_tmp', '/tmp/')
+ self.inventory.set_variable(container_name, 'ansible_connection', self.connection_plugin)
+ self.inventory.set_variable(container_name, 'ansible_{0}_pod'.format(self.transport),
+ pod_name)
+ self.inventory.set_variable(container_name, 'ansible_{0}_container'.format(self.transport),
+ container.name)
+ self.inventory.set_variable(container_name, 'ansible_{0}_namespace'.format(self.transport),
+ namespace)
+
+ def get_services_for_namespace(self, client, name, namespace):
+ v1_service = client.resources.get(api_version='v1', kind='Service')
+ try:
+ obj = v1_service.get(namespace=namespace)
+ except DynamicApiError as exc:
+ self.display.debug(exc)
+ raise K8sInventoryException('Error fetching Service list: %s' % format_dynamic_api_exc(exc))
+
+ namespace_group = 'namespace_{0}'.format(namespace)
+ namespace_services_group = '{0}_services'.format(namespace_group)
+
+ self.inventory.add_group(name)
+ self.inventory.add_group(namespace_group)
+ self.inventory.add_child(name, namespace_group)
+ self.inventory.add_group(namespace_services_group)
+ self.inventory.add_child(namespace_group, namespace_services_group)
+
+ for service in obj.items:
+ service_name = service.metadata.name
+ service_labels = {} if not service.metadata.labels else dict(service.metadata.labels)
+ service_annotations = {} if not service.metadata.annotations else dict(service.metadata.annotations)
+
+ self.inventory.add_host(service_name)
+
+ if service.metadata.labels:
+ # create a group for each label_value
+ for key, value in service.metadata.labels:
+ group_name = 'label_{0}_{1}'.format(key, value)
+ self.inventory.add_group(group_name)
+ self.inventory.add_child(group_name, service_name)
+
+ try:
+ self.inventory.add_child(namespace_services_group, service_name)
+ except AnsibleError:
+ raise
+
+ ports = [{'name': port.name,
+ 'port': port.port,
+ 'protocol': port.protocol,
+ 'targetPort': port.targetPort,
+ 'nodePort': port.nodePort} for port in service.spec.ports or []]
+
+ # add hostvars
+ self.inventory.set_variable(service_name, 'object_type', 'service')
+ self.inventory.set_variable(service_name, 'labels', service_labels)
+ self.inventory.set_variable(service_name, 'annotations', service_annotations)
+ self.inventory.set_variable(service_name, 'cluster_name', service.metadata.clusterName)
+ self.inventory.set_variable(service_name, 'ports', ports)
+ self.inventory.set_variable(service_name, 'type', service.spec.type)
+ self.inventory.set_variable(service_name, 'self_link', service.metadata.selfLink)
+ self.inventory.set_variable(service_name, 'resource_version', service.metadata.resourceVersion)
+ self.inventory.set_variable(service_name, 'uid', service.metadata.uid)
+
+ if service.spec.externalTrafficPolicy:
+ self.inventory.set_variable(service_name, 'external_traffic_policy',
+ service.spec.externalTrafficPolicy)
+ if service.spec.externalIPs:
+ self.inventory.set_variable(service_name, 'external_ips', service.spec.externalIPs)
+
+ if service.spec.externalName:
+ self.inventory.set_variable(service_name, 'external_name', service.spec.externalName)
+
+ if service.spec.healthCheckNodePort:
+ self.inventory.set_variable(service_name, 'health_check_node_port',
+ service.spec.healthCheckNodePort)
+ if service.spec.loadBalancerIP:
+ self.inventory.set_variable(service_name, 'load_balancer_ip',
+ service.spec.loadBalancerIP)
+ if service.spec.selector:
+ self.inventory.set_variable(service_name, 'selector', dict(service.spec.selector))
+
+ if hasattr(service.status.loadBalancer, 'ingress') and service.status.loadBalancer.ingress:
+ load_balancer = [{'hostname': ingress.hostname,
+ 'ip': ingress.ip} for ingress in service.status.loadBalancer.ingress]
+ self.inventory.set_variable(service_name, 'load_balancer', load_balancer)
diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/plugins/inventory/openshift.py b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/inventory/openshift.py
new file mode 100644
index 00000000..f6c393bd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/inventory/openshift.py
@@ -0,0 +1,202 @@
+# Copyright (c) 2018 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ name: openshift
+ plugin_type: inventory
+ author:
+ - Chris Houseknecht <@chouseknecht>
+
+ short_description: OpenShift inventory source
+
+ description:
+ - Fetch containers, services and routes for one or more clusters
+ - Groups by cluster name, namespace, namespace_services, namespace_pods, namespace_routes, and labels
+ - Uses openshift.(yml|yaml) YAML configuration file to set parameter values.
+
+ options:
+ plugin:
+ description: token that ensures this is a source file for the 'openshift' plugin.
+ required: True
+ choices: ['openshift']
+ connections:
+ description:
+ - Optional list of cluster connection settings. If no connections are provided, the default
+ I(~/.kube/config) and active context will be used, and objects will be returned for all namespaces
+ the active user is authorized to access.
+ suboptions:
+ name:
+ description:
+ - Optional name to assign to the cluster. If not provided, a name is constructed from the server
+ and port.
+ kubeconfig:
+ description:
+ - Path to an existing Kubernetes config file. If not provided, and no other connection
+ options are provided, the OpenShift client will attempt to load the default
+ configuration file from I(~/.kube/config.json). Can also be specified via K8S_AUTH_KUBECONFIG
+ environment variable.
+ context:
+ description:
+ - The name of a context found in the config file. Can also be specified via K8S_AUTH_CONTEXT environment
+ variable.
+ host:
+ description:
+ - Provide a URL for accessing the API. Can also be specified via K8S_AUTH_HOST environment variable.
+ api_key:
+ description:
+ - Token used to authenticate with the API. Can also be specified via K8S_AUTH_API_KEY environment
+ variable.
+ username:
+ description:
+ - Provide a username for authenticating with the API. Can also be specified via K8S_AUTH_USERNAME
+ environment variable.
+ password:
+ description:
+ - Provide a password for authenticating with the API. Can also be specified via K8S_AUTH_PASSWORD
+ environment variable.
+ client_cert:
+ description:
+ - Path to a certificate used to authenticate with the API. Can also be specified via K8S_AUTH_CERT_FILE
+ environment variable.
+ aliases: [ cert_file ]
+ client_key:
+ description:
+ - Path to a key file used to authenticate with the API. Can also be specified via K8S_AUTH_KEY_FILE
+ environment variable.
+ aliases: [ key_file ]
+ ca_cert:
+ description:
+ - Path to a CA certificate used to authenticate with the API. Can also be specified via
+ K8S_AUTH_SSL_CA_CERT environment variable.
+ aliases: [ ssl_ca_cert ]
+ validate_certs:
+ description:
+ - "Whether or not to verify the API server's SSL certificates. Can also be specified via
+ K8S_AUTH_VERIFY_SSL environment variable."
+ type: bool
+ aliases: [ verify_ssl ]
+ namespaces:
+ description:
+ - List of namespaces. If not specified, will fetch all containers for all namespaces user is authorized
+ to access.
+
+ requirements:
+ - "python >= 2.7"
+ - "openshift >= 0.6"
+ - "PyYAML >= 3.11"
+'''
+
+EXAMPLES = '''
+# File must be named openshift.yaml or openshift.yml
+
+# Authenticate with token, and return all pods and services for all namespaces
+plugin: community.kubernetes.openshift
+connections:
+ - host: https://192.168.64.4:8443
+ api_key: xxxxxxxxxxxxxxxx
+ verify_ssl: false
+
+# Use default config (~/.kube/config) file and active context, and return objects for a specific namespace
+plugin: community.kubernetes.openshift
+connections:
+ - namespaces:
+ - testing
+
+# Use a custom config file, and a specific context.
+plugin: community.kubernetes.openshift
+connections:
+ - kubeconfig: /path/to/config
+ context: 'awx/192-168-64-4:8443/developer'
+'''
+
+from ansible_collections.community.kubernetes.plugins.inventory.k8s import K8sInventoryException, InventoryModule as K8sInventoryModule, format_dynamic_api_exc
+
+try:
+ from openshift.dynamic.exceptions import DynamicApiError
+except ImportError:
+ pass
+
+
+class InventoryModule(K8sInventoryModule):
+ NAME = 'community.kubernetes.openshift'
+
+ transport = 'oc'
+
+ def fetch_objects(self, connections):
+ super(InventoryModule, self).fetch_objects(connections)
+
+ if connections:
+ if not isinstance(connections, list):
+ raise K8sInventoryException("Expecting connections to be a list.")
+
+ for connection in connections:
+ client = self.get_api_client(**connection)
+ name = connection.get('name', self.get_default_host_name(client.configuration.host))
+ if connection.get('namespaces'):
+ namespaces = connection['namespaces']
+ else:
+ namespaces = self.get_available_namespaces(client)
+ for namespace in namespaces:
+ self.get_routes_for_namespace(client, name, namespace)
+ else:
+ client = self.get_api_client()
+ name = self.get_default_host_name(client.configuration.host)
+ namespaces = self.get_available_namespaces(client)
+ for namespace in namespaces:
+ self.get_routes_for_namespace(client, name, namespace)
+
+ def get_routes_for_namespace(self, client, name, namespace):
+ v1_route = client.resources.get(api_version='v1', kind='Route')
+ try:
+ obj = v1_route.get(namespace=namespace)
+ except DynamicApiError as exc:
+ self.display.debug(exc)
+ raise K8sInventoryException('Error fetching Routes list: %s' % format_dynamic_api_exc(exc))
+
+ namespace_group = 'namespace_{0}'.format(namespace)
+ namespace_routes_group = '{0}_routes'.format(namespace_group)
+
+ self.inventory.add_group(name)
+ self.inventory.add_group(namespace_group)
+ self.inventory.add_child(name, namespace_group)
+ self.inventory.add_group(namespace_routes_group)
+ self.inventory.add_child(namespace_group, namespace_routes_group)
+ for route in obj.items:
+ route_name = route.metadata.name
+ route_annotations = {} if not route.metadata.annotations else dict(route.metadata.annotations)
+
+ self.inventory.add_host(route_name)
+
+ if route.metadata.labels:
+ # create a group for each label_value
+ for key, value in route.metadata.labels:
+ group_name = 'label_{0}_{1}'.format(key, value)
+ self.inventory.add_group(group_name)
+ self.inventory.add_child(group_name, route_name)
+ route_labels = dict(route.metadata.labels)
+ else:
+ route_labels = {}
+
+ self.inventory.add_child(namespace_routes_group, route_name)
+
+ # add hostvars
+ self.inventory.set_variable(route_name, 'labels', route_labels)
+ self.inventory.set_variable(route_name, 'annotations', route_annotations)
+ self.inventory.set_variable(route_name, 'cluster_name', route.metadata.clusterName)
+ self.inventory.set_variable(route_name, 'object_type', 'route')
+ self.inventory.set_variable(route_name, 'self_link', route.metadata.selfLink)
+ self.inventory.set_variable(route_name, 'resource_version', route.metadata.resourceVersion)
+ self.inventory.set_variable(route_name, 'uid', route.metadata.uid)
+
+ if route.spec.host:
+ self.inventory.set_variable(route_name, 'host', route.spec.host)
+
+ if route.spec.path:
+ self.inventory.set_variable(route_name, 'path', route.spec.path)
+
+ if hasattr(route.spec.port, 'targetPort') and route.spec.port.targetPort:
+ self.inventory.set_variable(route_name, 'port', dict(route.spec.port))
diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/plugins/lookup/k8s.py b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/lookup/k8s.py
new file mode 100644
index 00000000..68849053
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/lookup/k8s.py
@@ -0,0 +1,287 @@
+#
+# Copyright 2018 Red Hat | Ansible
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ lookup: k8s
+
+ short_description: Query the K8s API
+
+ description:
+ - Uses the OpenShift Python client to fetch a specific object by name, all matching objects within a
+ namespace, or all matching objects for all namespaces, as well as information about the cluster.
+ - Provides access the full range of K8s APIs.
+ - Enables authentication via config file, certificates, password or token.
+
+ options:
+ cluster_info:
+ description:
+ - Use to specify the type of cluster information you are attempting to retrieve. Will take priority
+ over all the other options.
+ api_version:
+ description:
+ - Use to specify the API version. If I(resource definition) is provided, the I(apiVersion) from the
+ I(resource_definition) will override this option.
+ default: v1
+ kind:
+ description:
+ - Use to specify an object model. If I(resource definition) is provided, the I(kind) from a
+ I(resource_definition) will override this option.
+ required: true
+ resource_name:
+ description:
+ - Fetch a specific object by name. If I(resource definition) is provided, the I(metadata.name) value
+ from the I(resource_definition) will override this option.
+ namespace:
+ description:
+ - Limit the objects returned to a specific namespace. If I(resource definition) is provided, the
+ I(metadata.namespace) value from the I(resource_definition) will override this option.
+ label_selector:
+ description:
+ - Additional labels to include in the query. Ignored when I(resource_name) is provided.
+ field_selector:
+ description:
+ - Specific fields on which to query. Ignored when I(resource_name) is provided.
+ resource_definition:
+ description:
+ - "Provide a YAML configuration for an object. NOTE: I(kind), I(api_version), I(resource_name),
+ and I(namespace) will be overwritten by corresponding values found in the provided I(resource_definition)."
+ src:
+ description:
+ - "Provide a path to a file containing a valid YAML definition of an object dated. Mutually
+ exclusive with I(resource_definition). NOTE: I(kind), I(api_version), I(resource_name), and I(namespace)
+ will be overwritten by corresponding values found in the configuration read in from the I(src) file."
+ - Reads from the local file system. To read from the Ansible controller's file system, use the file lookup
+ plugin or template lookup plugin, combined with the from_yaml filter, and pass the result to
+ I(resource_definition). See Examples below.
+ host:
+ description:
+ - Provide a URL for accessing the API. Can also be specified via K8S_AUTH_HOST environment variable.
+ api_key:
+ description:
+ - Token used to authenticate with the API. Can also be specified via K8S_AUTH_API_KEY environment variable.
+ kubeconfig:
+ description:
+ - Path to an existing Kubernetes config file. If not provided, and no other connection
+ options are provided, the openshift client will attempt to load the default
+ configuration file from I(~/.kube/config.json). Can also be specified via K8S_AUTH_KUBECONFIG environment
+ variable.
+ context:
+ description:
+ - The name of a context found in the config file. Can also be specified via K8S_AUTH_CONTEXT environment
+ variable.
+ username:
+ description:
+ - Provide a username for authenticating with the API. Can also be specified via K8S_AUTH_USERNAME environment
+ variable.
+ password:
+ description:
+ - Provide a password for authenticating with the API. Can also be specified via K8S_AUTH_PASSWORD environment
+ variable.
+ client_cert:
+ description:
+ - Path to a certificate used to authenticate with the API. Can also be specified via K8S_AUTH_CERT_FILE
+ environment
+ variable.
+ aliases: [ cert_file ]
+ client_key:
+ description:
+ - Path to a key file used to authenticate with the API. Can also be specified via K8S_AUTH_KEY_FILE environment
+ variable.
+ aliases: [ key_file ]
+ ca_cert:
+ description:
+ - Path to a CA certificate used to authenticate with the API. Can also be specified via K8S_AUTH_SSL_CA_CERT
+ environment variable.
+ aliases: [ ssl_ca_cert ]
+ validate_certs:
+ description:
+ - Whether or not to verify the API server's SSL certificates. Can also be specified via K8S_AUTH_VERIFY_SSL
+ environment variable.
+ type: bool
+ aliases: [ verify_ssl ]
+
+ requirements:
+ - "python >= 2.7"
+ - "openshift >= 0.6"
+ - "PyYAML >= 3.11"
+
+ notes:
+ - "The OpenShift Python client wraps the K8s Python client, providing full access to
+ all of the APIS and models available on both platforms. For API version details and
+ additional information visit https://github.com/openshift/openshift-restclient-python"
+'''
+
+EXAMPLES = """
+- name: Fetch a list of namespaces
+ set_fact:
+ projects: "{{ lookup('community.kubernetes.k8s', api_version='v1', kind='Namespace') }}"
+
+- name: Fetch all deployments
+ set_fact:
+ deployments: "{{ lookup('community.kubernetes.k8s', kind='Deployment') }}"
+
+- name: Fetch all deployments in a namespace
+ set_fact:
+ deployments: "{{ lookup('community.kubernetes.k8s', kind='Deployment', namespace='testing') }}"
+
+- name: Fetch a specific deployment by name
+ set_fact:
+ deployments: "{{ lookup('community.kubernetes.k8s', kind='Deployment', namespace='testing', resource_name='elastic') }}"
+
+- name: Fetch with label selector
+ set_fact:
+ service: "{{ lookup('community.kubernetes.k8s', kind='Service', label_selector='app=galaxy') }}"
+
+# Use parameters from a YAML config
+
+- name: Load config from the Ansible controller filesystem
+ set_fact:
+ config: "{{ lookup('file', 'service.yml') | from_yaml }}"
+
+- name: Using the config (loaded from a file in prior task), fetch the latest version of the object
+ set_fact:
+ service: "{{ lookup('community.kubernetes.k8s', resource_definition=config) }}"
+
+- name: Use a config from the local filesystem
+ set_fact:
+ service: "{{ lookup('community.kubernetes.k8s', src='service.yml') }}"
+"""
+
+RETURN = """
+ _list:
+ description:
+ - One ore more object definitions returned from the API.
+ type: complex
+ contains:
+ api_version:
+ description: The versioned schema of this representation of an object.
+ returned: success
+ type: str
+ kind:
+ description: Represents the REST resource this object represents.
+ returned: success
+ type: str
+ metadata:
+ description: Standard object metadata. Includes name, namespace, annotations, labels, etc.
+ returned: success
+ type: complex
+ spec:
+ description: Specific attributes of the object. Will vary based on the I(api_version) and I(kind).
+ returned: success
+ type: complex
+ status:
+ description: Current status details for the object.
+ returned: success
+ type: complex
+"""
+
+from ansible.errors import AnsibleError
+from ansible.module_utils.common._collections_compat import KeysView
+from ansible.plugins.lookup import LookupBase
+
+from ansible_collections.community.kubernetes.plugins.module_utils.common import K8sAnsibleMixin
+
+
+try:
+ from openshift.dynamic.exceptions import NotFoundError
+ HAS_K8S_MODULE_HELPER = True
+ k8s_import_exception = None
+except ImportError as e:
+ HAS_K8S_MODULE_HELPER = False
+ k8s_import_exception = e
+
+
+class KubernetesLookup(K8sAnsibleMixin):
+
+ def __init__(self):
+
+ if not HAS_K8S_MODULE_HELPER:
+ raise Exception(
+ "Requires the OpenShift Python client. Try `pip install openshift`. Detail: {0}".format(k8s_import_exception)
+ )
+
+ self.kind = None
+ self.name = None
+ self.namespace = None
+ self.api_version = None
+ self.label_selector = None
+ self.field_selector = None
+ self.include_uninitialized = None
+ self.resource_definition = None
+ self.helper = None
+ self.connection = {}
+
+ def fail(self, msg=None):
+ raise AnsibleError(msg)
+
+ def run(self, terms, variables=None, **kwargs):
+ self.params = kwargs
+ self.client = self.get_api_client()
+
+ cluster_info = kwargs.get('cluster_info')
+ if cluster_info == 'version':
+ return [self.client.version]
+ if cluster_info == 'api_groups':
+ if isinstance(self.client.resources.api_groups, KeysView):
+ return [list(self.client.resources.api_groups)]
+ return [self.client.resources.api_groups]
+
+ self.kind = kwargs.get('kind')
+ self.name = kwargs.get('resource_name')
+ self.namespace = kwargs.get('namespace')
+ self.api_version = kwargs.get('api_version', 'v1')
+ self.label_selector = kwargs.get('label_selector')
+ self.field_selector = kwargs.get('field_selector')
+ self.include_uninitialized = kwargs.get('include_uninitialized', False)
+
+ resource_definition = kwargs.get('resource_definition')
+ src = kwargs.get('src')
+ if src:
+ resource_definition = self.load_resource_definitions(src)[0]
+ if resource_definition:
+ self.kind = resource_definition.get('kind', self.kind)
+ self.api_version = resource_definition.get('apiVersion', self.api_version)
+ self.name = resource_definition.get('metadata', {}).get('name', self.name)
+ self.namespace = resource_definition.get('metadata', {}).get('namespace', self.namespace)
+
+ if not self.kind:
+ raise AnsibleError(
+ "Error: no Kind specified. Use the 'kind' parameter, or provide an object YAML configuration "
+ "using the 'resource_definition' parameter."
+ )
+
+ resource = self.find_resource(self.kind, self.api_version, fail=True)
+ try:
+ k8s_obj = resource.get(name=self.name, namespace=self.namespace, label_selector=self.label_selector, field_selector=self.field_selector)
+ except NotFoundError:
+ return []
+
+ if self.name:
+ return [k8s_obj.to_dict()]
+
+ return k8s_obj.to_dict().get('items')
+
+
+class LookupModule(LookupBase):
+
+ def run(self, terms, variables=None, **kwargs):
+ return KubernetesLookup().run(terms, variables=variables, **kwargs)
diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/plugins/module_utils/__init__.py b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/module_utils/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/module_utils/__init__.py
diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/plugins/module_utils/common.py b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/module_utils/common.py
new file mode 100644
index 00000000..d303eab6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/module_utils/common.py
@@ -0,0 +1,818 @@
+# Copyright 2018 Red Hat | Ansible
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import time
+import os
+import traceback
+import sys
+from datetime import datetime
+from distutils.version import LooseVersion
+
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.six import iteritems, string_types
+from ansible.module_utils._text import to_native
+from ansible.module_utils.common.dict_transformations import dict_merge
+
+K8S_IMP_ERR = None
+try:
+ import kubernetes
+ import openshift
+ from openshift.dynamic import DynamicClient
+ from openshift.dynamic.exceptions import (
+ ResourceNotFoundError, ResourceNotUniqueError, NotFoundError, DynamicApiError,
+ ConflictError, ForbiddenError, MethodNotAllowedError)
+ HAS_K8S_MODULE_HELPER = True
+ k8s_import_exception = None
+except ImportError as e:
+ HAS_K8S_MODULE_HELPER = False
+ k8s_import_exception = e
+ K8S_IMP_ERR = traceback.format_exc()
+
+YAML_IMP_ERR = None
+try:
+ import yaml
+ HAS_YAML = True
+except ImportError:
+ YAML_IMP_ERR = traceback.format_exc()
+ HAS_YAML = False
+
+K8S_CONFIG_HASH_IMP_ERR = None
+try:
+ from openshift.helper.hashes import generate_hash
+ from openshift.dynamic.exceptions import KubernetesValidateMissing
+ HAS_K8S_CONFIG_HASH = True
+except ImportError:
+ K8S_CONFIG_HASH_IMP_ERR = traceback.format_exc()
+ HAS_K8S_CONFIG_HASH = False
+
+HAS_K8S_APPLY = None
+try:
+ from openshift.dynamic.apply import apply_object
+ HAS_K8S_APPLY = True
+except ImportError:
+ HAS_K8S_APPLY = False
+
+try:
+ import urllib3
+ urllib3.disable_warnings()
+except ImportError:
+ pass
+
+try:
+ from openshift.dynamic.apply import recursive_diff
+except ImportError:
+ from ansible.module_utils.common.dict_transformations import recursive_diff
+
+try:
+ try:
+ # >=0.10
+ from openshift.dynamic.resource import ResourceInstance
+ except ImportError:
+ # <0.10
+ from openshift.dynamic.client import ResourceInstance
+ HAS_K8S_INSTANCE_HELPER = True
+ k8s_import_exception = None
+except ImportError as e:
+ HAS_K8S_INSTANCE_HELPER = False
+ k8s_import_exception = e
+ K8S_IMP_ERR = traceback.format_exc()
+
+
+def list_dict_str(value):
+ if isinstance(value, (list, dict, string_types)):
+ return value
+ raise TypeError
+
+
+ARG_ATTRIBUTES_BLACKLIST = ('property_path',)
+
+COMMON_ARG_SPEC = {
+ 'state': {
+ 'default': 'present',
+ 'choices': ['present', 'absent'],
+ },
+ 'force': {
+ 'type': 'bool',
+ 'default': False,
+ },
+}
+
+RESOURCE_ARG_SPEC = {
+ 'resource_definition': {
+ 'type': list_dict_str,
+ 'aliases': ['definition', 'inline']
+ },
+ 'src': {
+ 'type': 'path',
+ },
+}
+
+NAME_ARG_SPEC = {
+ 'kind': {},
+ 'name': {},
+ 'namespace': {},
+ 'api_version': {
+ 'default': 'v1',
+ 'aliases': ['api', 'version'],
+ },
+}
+
+AUTH_ARG_SPEC = {
+ 'kubeconfig': {
+ 'type': 'path',
+ },
+ 'context': {},
+ 'host': {},
+ 'api_key': {
+ 'no_log': True,
+ },
+ 'username': {},
+ 'password': {
+ 'no_log': True,
+ },
+ 'validate_certs': {
+ 'type': 'bool',
+ 'aliases': ['verify_ssl'],
+ },
+ 'ca_cert': {
+ 'type': 'path',
+ 'aliases': ['ssl_ca_cert'],
+ },
+ 'client_cert': {
+ 'type': 'path',
+ 'aliases': ['cert_file'],
+ },
+ 'client_key': {
+ 'type': 'path',
+ 'aliases': ['key_file'],
+ },
+ 'proxy': {
+ 'type': 'str',
+ },
+ 'persist_config': {
+ 'type': 'bool',
+ },
+}
+
+WAIT_ARG_SPEC = dict(
+ wait=dict(type='bool', default=False),
+ wait_sleep=dict(type='int', default=5),
+ wait_timeout=dict(type='int', default=120),
+ wait_condition=dict(
+ type='dict',
+ default=None,
+ options=dict(
+ type=dict(),
+ status=dict(default=True, choices=[True, False, "Unknown"]),
+ reason=dict()
+ )
+ )
+)
+
+# Map kubernetes-client parameters to ansible parameters
+AUTH_ARG_MAP = {
+ 'kubeconfig': 'kubeconfig',
+ 'context': 'context',
+ 'host': 'host',
+ 'api_key': 'api_key',
+ 'username': 'username',
+ 'password': 'password',
+ 'verify_ssl': 'validate_certs',
+ 'ssl_ca_cert': 'ca_cert',
+ 'cert_file': 'client_cert',
+ 'key_file': 'client_key',
+ 'proxy': 'proxy',
+ 'persist_config': 'persist_config',
+}
+
+
+class K8sAnsibleMixin(object):
+
+ def __init__(self, *args, **kwargs):
+ if not HAS_K8S_MODULE_HELPER:
+ self.fail_json(msg=missing_required_lib('openshift'), exception=K8S_IMP_ERR,
+ error=to_native(k8s_import_exception))
+ self.openshift_version = openshift.__version__
+
+ if not HAS_YAML:
+ self.fail_json(msg=missing_required_lib("PyYAML"), exception=YAML_IMP_ERR)
+
+ def get_api_client(self, **auth_params):
+ auth_params = auth_params or getattr(self, 'params', {})
+ auth = {}
+
+ # If authorization variables aren't defined, look for them in environment variables
+ for true_name, arg_name in AUTH_ARG_MAP.items():
+ if auth_params.get(arg_name) is None:
+ env_value = os.getenv('K8S_AUTH_{0}'.format(arg_name.upper()), None) or os.getenv('K8S_AUTH_{0}'.format(true_name.upper()), None)
+ if env_value is not None:
+ if AUTH_ARG_SPEC[arg_name].get('type') == 'bool':
+ env_value = env_value.lower() not in ['0', 'false', 'no']
+ auth[true_name] = env_value
+ else:
+ auth[true_name] = auth_params[arg_name]
+
+ def auth_set(*names):
+ return all([auth.get(name) for name in names])
+
+ if auth_set('username', 'password', 'host') or auth_set('api_key', 'host'):
+ # We have enough in the parameters to authenticate, no need to load incluster or kubeconfig
+ pass
+ elif auth_set('kubeconfig') or auth_set('context'):
+ try:
+ kubernetes.config.load_kube_config(auth.get('kubeconfig'), auth.get('context'), persist_config=auth.get('persist_config'))
+ except Exception as err:
+ self.fail(msg='Failed to load kubeconfig due to %s' % to_native(err))
+ else:
+ # First try to do incluster config, then kubeconfig
+ try:
+ kubernetes.config.load_incluster_config()
+ except kubernetes.config.ConfigException:
+ try:
+ kubernetes.config.load_kube_config(auth.get('kubeconfig'), auth.get('context'), persist_config=auth.get('persist_config'))
+ except Exception as err:
+ self.fail(msg='Failed to load kubeconfig due to %s' % to_native(err))
+
+ # Override any values in the default configuration with Ansible parameters
+ configuration = kubernetes.client.Configuration()
+ for key, value in iteritems(auth):
+ if key in AUTH_ARG_MAP.keys() and value is not None:
+ if key == 'api_key':
+ setattr(configuration, key, {'authorization': "Bearer {0}".format(value)})
+ else:
+ setattr(configuration, key, value)
+
+ kubernetes.client.Configuration.set_default(configuration)
+ try:
+ return DynamicClient(kubernetes.client.ApiClient(configuration))
+ except Exception as err:
+ self.fail(msg='Failed to get client due to %s' % to_native(err))
+
+ def find_resource(self, kind, api_version, fail=False):
+ for attribute in ['kind', 'name', 'singular_name']:
+ try:
+ return self.client.resources.get(**{'api_version': api_version, attribute: kind})
+ except (ResourceNotFoundError, ResourceNotUniqueError):
+ pass
+ try:
+ return self.client.resources.get(api_version=api_version, short_names=[kind])
+ except (ResourceNotFoundError, ResourceNotUniqueError):
+ if fail:
+ self.fail(msg='Failed to find exact match for {0}.{1} by [kind, name, singularName, shortNames]'.format(api_version, kind))
+
+ def kubernetes_facts(self, kind, api_version, name=None, namespace=None, label_selectors=None, field_selectors=None,
+ wait=False, wait_sleep=5, wait_timeout=120, state='present', condition=None):
+ resource = self.find_resource(kind, api_version)
+ if not resource:
+ return dict(resources=[])
+
+ if not label_selectors:
+ label_selectors = []
+ if not field_selectors:
+ field_selectors = []
+
+ try:
+ result = resource.get(name=name,
+ namespace=namespace,
+ label_selector=','.join(label_selectors),
+ field_selector=','.join(field_selectors))
+ if wait:
+ satisfied_by = []
+ if isinstance(result, ResourceInstance):
+ # We have a list of ResourceInstance
+ resource_list = result.get('items', [])
+ if not resource_list:
+ resource_list = [result]
+
+ for resource_instance in resource_list:
+ success, res, duration = self.wait(resource, resource_instance,
+ sleep=wait_sleep, timeout=wait_timeout,
+ state=state, condition=condition)
+ if not success:
+ self.fail(msg="Failed to gather information about %s(s) even"
+ " after waiting for %s seconds" % (res.get('kind'), duration))
+ satisfied_by.append(res)
+ return dict(resources=satisfied_by)
+ result = result.to_dict()
+ except (openshift.dynamic.exceptions.BadRequestError, openshift.dynamic.exceptions.NotFoundError):
+ return dict(resources=[])
+
+ if 'items' in result:
+ return dict(resources=result['items'])
+ return dict(resources=[result])
+
+ def remove_aliases(self):
+ """
+ The helper doesn't know what to do with aliased keys
+ """
+ for k, v in iteritems(self.argspec):
+ if 'aliases' in v:
+ for alias in v['aliases']:
+ if alias in self.params:
+ self.params.pop(alias)
+
+ def load_resource_definitions(self, src):
+ """ Load the requested src path """
+ result = None
+ path = os.path.normpath(src)
+ if not os.path.exists(path):
+ self.fail(msg="Error accessing {0}. Does the file exist?".format(path))
+ try:
+ with open(path, 'r') as f:
+ result = list(yaml.safe_load_all(f))
+ except (IOError, yaml.YAMLError) as exc:
+ self.fail(msg="Error loading resource_definition: {0}".format(exc))
+ return result
+
+ def diff_objects(self, existing, new):
+ result = dict()
+ diff = recursive_diff(existing, new)
+ if not diff:
+ return True, result
+
+ result['before'] = diff[0]
+ result['after'] = diff[1]
+
+ # If only metadata.generation and metadata.resourceVersion changed, ignore it
+ ignored_keys = set(['generation', 'resourceVersion'])
+
+ if list(result['after'].keys()) != ['metadata'] or list(result['before'].keys()) != ['metadata']:
+ return False, result
+
+ if not set(result['after']['metadata'].keys()).issubset(ignored_keys):
+ return False, result
+ if not set(result['before']['metadata'].keys()).issubset(ignored_keys):
+ return False, result
+
+ if hasattr(self, 'warn'):
+ self.warn('No meaningful diff was generated, but the API may not be idempotent (only metadata.generation or metadata.resourceVersion were changed)')
+
+ return True, result
+
+ def fail(self, msg=None):
+ self.fail_json(msg=msg)
+
+ def _wait_for(self, resource, name, namespace, predicate, sleep, timeout, state):
+ start = datetime.now()
+
+ def _wait_for_elapsed():
+ return (datetime.now() - start).seconds
+
+ response = None
+ while _wait_for_elapsed() < timeout:
+ try:
+ response = resource.get(name=name, namespace=namespace)
+ if predicate(response):
+ if response:
+ return True, response.to_dict(), _wait_for_elapsed()
+ return True, {}, _wait_for_elapsed()
+ time.sleep(sleep)
+ except NotFoundError:
+ if state == 'absent':
+ return True, {}, _wait_for_elapsed()
+ if response:
+ response = response.to_dict()
+ return False, response, _wait_for_elapsed()
+
+ def wait(self, resource, definition, sleep, timeout, state='present', condition=None):
+
+ def _deployment_ready(deployment):
+ # FIXME: frustratingly bool(deployment.status) is True even if status is empty
+ # Furthermore deployment.status.availableReplicas == deployment.status.replicas == None if status is empty
+ # deployment.status.replicas is None is perfectly ok if desired replicas == 0
+ # Scaling up means that we also need to check that we're not in a
+ # situation where status.replicas == status.availableReplicas
+ # but spec.replicas != status.replicas
+ return (deployment.status
+ and deployment.spec.replicas == (deployment.status.replicas or 0)
+ and deployment.status.availableReplicas == deployment.status.replicas
+ and deployment.status.observedGeneration == deployment.metadata.generation
+ and not deployment.status.unavailableReplicas)
+
+ def _pod_ready(pod):
+ return (pod.status and pod.status.containerStatuses is not None
+ and all([container.ready for container in pod.status.containerStatuses]))
+
+ def _daemonset_ready(daemonset):
+ return (daemonset.status and daemonset.status.desiredNumberScheduled is not None
+ and daemonset.status.numberReady == daemonset.status.desiredNumberScheduled
+ and daemonset.status.observedGeneration == daemonset.metadata.generation
+ and not daemonset.status.unavailableReplicas)
+
+ def _custom_condition(resource):
+ if not resource.status or not resource.status.conditions:
+ return False
+ match = [x for x in resource.status.conditions if x.type == condition['type']]
+ if not match:
+ return False
+ # There should never be more than one condition of a specific type
+ match = match[0]
+ if match.status == 'Unknown':
+ if match.status == condition['status']:
+ if 'reason' not in condition:
+ return True
+ if condition['reason']:
+ return match.reason == condition['reason']
+ return False
+ status = True if match.status == 'True' else False
+ if status == condition['status']:
+ if condition.get('reason'):
+ return match.reason == condition['reason']
+ return True
+ return False
+
+ def _resource_absent(resource):
+ return not resource
+
+ waiter = dict(
+ Deployment=_deployment_ready,
+ DaemonSet=_daemonset_ready,
+ Pod=_pod_ready
+ )
+ kind = definition['kind']
+ if state == 'present' and not condition:
+ predicate = waiter.get(kind, lambda x: x)
+ elif state == 'present' and condition:
+ predicate = _custom_condition
+ else:
+ predicate = _resource_absent
+ return self._wait_for(resource, definition['metadata']['name'], definition['metadata'].get('namespace'), predicate, sleep, timeout, state)
+
+ def set_resource_definitions(self):
+ resource_definition = self.params.get('resource_definition')
+
+ self.resource_definitions = []
+
+ if resource_definition:
+ if isinstance(resource_definition, string_types):
+ try:
+ self.resource_definitions = yaml.safe_load_all(resource_definition)
+ except (IOError, yaml.YAMLError) as exc:
+ self.fail(msg="Error loading resource_definition: {0}".format(exc))
+ elif isinstance(resource_definition, list):
+ self.resource_definitions = resource_definition
+ else:
+ self.resource_definitions = [resource_definition]
+
+ src = self.params.get('src')
+ if src:
+ self.resource_definitions = self.load_resource_definitions(src)
+ try:
+ self.resource_definitions = [item for item in self.resource_definitions if item]
+ except AttributeError:
+ pass
+
+ if not resource_definition and not src:
+ implicit_definition = dict(
+ kind=self.kind,
+ apiVersion=self.api_version,
+ metadata=dict(name=self.name)
+ )
+ if self.namespace:
+ implicit_definition['metadata']['namespace'] = self.namespace
+ self.resource_definitions = [implicit_definition]
+
+ def check_library_version(self):
+ validate = self.params.get('validate')
+ if validate and LooseVersion(self.openshift_version) < LooseVersion("0.8.0"):
+ self.fail_json(msg="openshift >= 0.8.0 is required for validate")
+ self.append_hash = self.params.get('append_hash')
+ if self.append_hash and not HAS_K8S_CONFIG_HASH:
+ self.fail_json(msg=missing_required_lib("openshift >= 0.7.2", reason="for append_hash"),
+ exception=K8S_CONFIG_HASH_IMP_ERR)
+ if self.params['merge_type'] and LooseVersion(self.openshift_version) < LooseVersion("0.6.2"):
+ self.fail_json(msg=missing_required_lib("openshift >= 0.6.2", reason="for merge_type"))
+ self.apply = self.params.get('apply', False)
+ if self.apply and not HAS_K8S_APPLY:
+ self.fail_json(msg=missing_required_lib("openshift >= 0.9.2", reason="for apply"))
+ wait = self.params.get('wait', False)
+ if wait and not HAS_K8S_INSTANCE_HELPER:
+ self.fail_json(msg=missing_required_lib("openshift >= 0.4.0", reason="for wait"))
+
+ def flatten_list_kind(self, list_resource, definitions):
+ flattened = []
+ parent_api_version = list_resource.group_version if list_resource else None
+ parent_kind = list_resource.kind[:-4] if list_resource else None
+ for definition in definitions.get('items', []):
+ resource = self.find_resource(definition.get('kind', parent_kind), definition.get('apiVersion', parent_api_version), fail=True)
+ flattened.append((resource, self.set_defaults(resource, definition)))
+ return flattened
+
+ def execute_module(self):
+ changed = False
+ results = []
+ try:
+ self.client = self.get_api_client()
+ # Hopefully the kubernetes client will provide its own exception class one day
+ except (urllib3.exceptions.RequestError) as e:
+ self.fail_json(msg="Couldn't connect to Kubernetes: %s" % str(e))
+
+ flattened_definitions = []
+ for definition in self.resource_definitions:
+ if definition is None:
+ continue
+ kind = definition.get('kind', self.kind)
+ api_version = definition.get('apiVersion', self.api_version)
+ if kind and kind.endswith('List'):
+ resource = self.find_resource(kind, api_version, fail=False)
+ flattened_definitions.extend(self.flatten_list_kind(resource, definition))
+ else:
+ resource = self.find_resource(kind, api_version, fail=True)
+ flattened_definitions.append((resource, definition))
+
+ for (resource, definition) in flattened_definitions:
+ kind = definition.get('kind', self.kind)
+ api_version = definition.get('apiVersion', self.api_version)
+ definition = self.set_defaults(resource, definition)
+ self.warnings = []
+ if self.params['validate'] is not None:
+ self.warnings = self.validate(definition)
+ result = self.perform_action(resource, definition)
+ result['warnings'] = self.warnings
+ changed = changed or result['changed']
+ results.append(result)
+
+ if len(results) == 1:
+ self.exit_json(**results[0])
+
+ self.exit_json(**{
+ 'changed': changed,
+ 'result': {
+ 'results': results
+ }
+ })
+
+ def validate(self, resource):
+ def _prepend_resource_info(resource, msg):
+ return "%s %s: %s" % (resource['kind'], resource['metadata']['name'], msg)
+
+ try:
+ warnings, errors = self.client.validate(resource, self.params['validate'].get('version'), self.params['validate'].get('strict'))
+ except KubernetesValidateMissing:
+ self.fail_json(msg="kubernetes-validate python library is required to validate resources")
+
+ if errors and self.params['validate']['fail_on_error']:
+ self.fail_json(msg="\n".join([_prepend_resource_info(resource, error) for error in errors]))
+ else:
+ return [_prepend_resource_info(resource, msg) for msg in warnings + errors]
+
+ def set_defaults(self, resource, definition):
+ definition['kind'] = resource.kind
+ definition['apiVersion'] = resource.group_version
+ metadata = definition.get('metadata', {})
+ if self.name and not metadata.get('name'):
+ metadata['name'] = self.name
+ if resource.namespaced and self.namespace and not metadata.get('namespace'):
+ metadata['namespace'] = self.namespace
+ definition['metadata'] = metadata
+ return definition
+
+ def perform_action(self, resource, definition):
+ result = {'changed': False, 'result': {}}
+ state = self.params.get('state', None)
+ force = self.params.get('force', False)
+ name = definition['metadata'].get('name')
+ namespace = definition['metadata'].get('namespace')
+ existing = None
+ wait = self.params.get('wait')
+ wait_sleep = self.params.get('wait_sleep')
+ wait_timeout = self.params.get('wait_timeout')
+ wait_condition = None
+ if self.params.get('wait_condition') and self.params['wait_condition'].get('type'):
+ wait_condition = self.params['wait_condition']
+
+ self.remove_aliases()
+
+ try:
+ # ignore append_hash for resources other than ConfigMap and Secret
+ if self.append_hash and definition['kind'] in ['ConfigMap', 'Secret']:
+ name = '%s-%s' % (name, generate_hash(definition))
+ definition['metadata']['name'] = name
+ params = dict(name=name)
+ if namespace:
+ params['namespace'] = namespace
+ existing = resource.get(**params)
+ except (NotFoundError, MethodNotAllowedError):
+ # Remove traceback so that it doesn't show up in later failures
+ try:
+ sys.exc_clear()
+ except AttributeError:
+ # no sys.exc_clear on python3
+ pass
+ except ForbiddenError as exc:
+ if definition['kind'] in ['Project', 'ProjectRequest'] and state != 'absent':
+ return self.create_project_request(definition)
+ self.fail_json(msg='Failed to retrieve requested object: {0}'.format(exc.body),
+ error=exc.status, status=exc.status, reason=exc.reason)
+ except DynamicApiError as exc:
+ self.fail_json(msg='Failed to retrieve requested object: {0}'.format(exc.body),
+ error=exc.status, status=exc.status, reason=exc.reason)
+ except Exception as exc:
+ self.fail_json(msg='Failed to retrieve requested object: {0}'.format(to_native(exc)),
+ error='', status='', reason='')
+
+ if state == 'absent':
+ result['method'] = "delete"
+ if not existing:
+ # The object already does not exist
+ return result
+ else:
+ # Delete the object
+ result['changed'] = True
+ if not self.check_mode:
+ try:
+ k8s_obj = resource.delete(**params)
+ result['result'] = k8s_obj.to_dict()
+ except DynamicApiError as exc:
+ self.fail_json(msg="Failed to delete object: {0}".format(exc.body),
+ error=exc.status, status=exc.status, reason=exc.reason)
+ if wait:
+ success, resource, duration = self.wait(resource, definition, wait_sleep, wait_timeout, 'absent')
+ result['duration'] = duration
+ if not success:
+ self.fail_json(msg="Resource deletion timed out", **result)
+ return result
+ else:
+ if self.apply:
+ if self.check_mode:
+ ignored, patch = apply_object(resource, definition)
+ if existing:
+ k8s_obj = dict_merge(existing.to_dict(), patch)
+ else:
+ k8s_obj = patch
+ else:
+ try:
+ k8s_obj = resource.apply(definition, namespace=namespace).to_dict()
+ except DynamicApiError as exc:
+ msg = "Failed to apply object: {0}".format(exc.body)
+ if self.warnings:
+ msg += "\n" + "\n ".join(self.warnings)
+ self.fail_json(msg=msg, error=exc.status, status=exc.status, reason=exc.reason)
+ success = True
+ result['result'] = k8s_obj
+ if wait and not self.check_mode:
+ success, result['result'], result['duration'] = self.wait(resource, definition, wait_sleep, wait_timeout, condition=wait_condition)
+ if existing:
+ existing = existing.to_dict()
+ else:
+ existing = {}
+ match, diffs = self.diff_objects(existing, result['result'])
+ result['changed'] = not match
+ result['diff'] = diffs
+ result['method'] = 'apply'
+ if not success:
+ self.fail_json(msg="Resource apply timed out", **result)
+ return result
+
+ if not existing:
+ if self.check_mode:
+ k8s_obj = definition
+ else:
+ try:
+ k8s_obj = resource.create(definition, namespace=namespace).to_dict()
+ except ConflictError:
+ # Some resources, like ProjectRequests, can't be created multiple times,
+ # because the resources that they create don't match their kind
+ # In this case we'll mark it as unchanged and warn the user
+ self.warn("{0} was not found, but creating it returned a 409 Conflict error. This can happen \
+ if the resource you are creating does not directly create a resource of the same kind.".format(name))
+ return result
+ except DynamicApiError as exc:
+ msg = "Failed to create object: {0}".format(exc.body)
+ if self.warnings:
+ msg += "\n" + "\n ".join(self.warnings)
+ self.fail_json(msg=msg, error=exc.status, status=exc.status, reason=exc.reason)
+ success = True
+ result['result'] = k8s_obj
+ if wait and not self.check_mode:
+ success, result['result'], result['duration'] = self.wait(resource, definition, wait_sleep, wait_timeout, condition=wait_condition)
+ result['changed'] = True
+ result['method'] = 'create'
+ if not success:
+ self.fail_json(msg="Resource creation timed out", **result)
+ return result
+
+ match = False
+ diffs = []
+
+ if existing and force:
+ if self.check_mode:
+ k8s_obj = definition
+ else:
+ try:
+ k8s_obj = resource.replace(definition, name=name, namespace=namespace, append_hash=self.append_hash).to_dict()
+ except DynamicApiError as exc:
+ msg = "Failed to replace object: {0}".format(exc.body)
+ if self.warnings:
+ msg += "\n" + "\n ".join(self.warnings)
+ self.fail_json(msg=msg, error=exc.status, status=exc.status, reason=exc.reason)
+ match, diffs = self.diff_objects(existing.to_dict(), k8s_obj)
+ success = True
+ result['result'] = k8s_obj
+ if wait and not self.check_mode:
+ success, result['result'], result['duration'] = self.wait(resource, definition, wait_sleep, wait_timeout, condition=wait_condition)
+ match, diffs = self.diff_objects(existing.to_dict(), result['result'])
+ result['changed'] = not match
+ result['method'] = 'replace'
+ result['diff'] = diffs
+ if not success:
+ self.fail_json(msg="Resource replacement timed out", **result)
+ return result
+
+ # Differences exist between the existing obj and requested params
+ if self.check_mode:
+ k8s_obj = dict_merge(existing.to_dict(), definition)
+ else:
+ if LooseVersion(self.openshift_version) < LooseVersion("0.6.2"):
+ k8s_obj, error = self.patch_resource(resource, definition, existing, name,
+ namespace)
+ else:
+ for merge_type in self.params['merge_type'] or ['strategic-merge', 'merge']:
+ k8s_obj, error = self.patch_resource(resource, definition, existing, name,
+ namespace, merge_type=merge_type)
+ if not error:
+ break
+ if error:
+ self.fail_json(**error)
+
+ success = True
+ result['result'] = k8s_obj
+ if wait and not self.check_mode:
+ success, result['result'], result['duration'] = self.wait(resource, definition, wait_sleep, wait_timeout, condition=wait_condition)
+ match, diffs = self.diff_objects(existing.to_dict(), result['result'])
+ result['changed'] = not match
+ result['method'] = 'patch'
+ result['diff'] = diffs
+
+ if not success:
+ self.fail_json(msg="Resource update timed out", **result)
+ return result
+
+ def patch_resource(self, resource, definition, existing, name, namespace, merge_type=None):
+ try:
+ params = dict(name=name, namespace=namespace)
+ if merge_type:
+ params['content_type'] = 'application/{0}-patch+json'.format(merge_type)
+ k8s_obj = resource.patch(definition, **params).to_dict()
+ match, diffs = self.diff_objects(existing.to_dict(), k8s_obj)
+ error = {}
+ return k8s_obj, {}
+ except DynamicApiError as exc:
+ msg = "Failed to patch object: {0}".format(exc.body)
+ if self.warnings:
+ msg += "\n" + "\n ".join(self.warnings)
+ error = dict(msg=msg, error=exc.status, status=exc.status, reason=exc.reason, warnings=self.warnings)
+ return None, error
+ except Exception as exc:
+ msg = "Failed to patch object: {0}".format(exc)
+ if self.warnings:
+ msg += "\n" + "\n ".join(self.warnings)
+ error = dict(msg=msg, error=to_native(exc), status='', reason='', warnings=self.warnings)
+ return None, error
+
+ def create_project_request(self, definition):
+ definition['kind'] = 'ProjectRequest'
+ result = {'changed': False, 'result': {}}
+ resource = self.find_resource('ProjectRequest', definition['apiVersion'], fail=True)
+ if not self.check_mode:
+ try:
+ k8s_obj = resource.create(definition)
+ result['result'] = k8s_obj.to_dict()
+ except DynamicApiError as exc:
+ self.fail_json(msg="Failed to create object: {0}".format(exc.body),
+ error=exc.status, status=exc.status, reason=exc.reason)
+ result['changed'] = True
+ result['method'] = 'create'
+ return result
+
+
+class KubernetesAnsibleModule(AnsibleModule, K8sAnsibleMixin):
+ # NOTE: This class KubernetesAnsibleModule is deprecated in favor of
+ # class K8sAnsibleMixin and will be removed 2.0.0 release.
+ # Please use K8sAnsibleMixin instead.
+
+ def __init__(self, *args, **kwargs):
+ kwargs['argument_spec'] = self.argspec
+ AnsibleModule.__init__(self, *args, **kwargs)
+ K8sAnsibleMixin.__init__(self, *args, **kwargs)
+
+ self.warn("class KubernetesAnsibleModule is deprecated"
+ " and will be removed in 2.0.0. Please use K8sAnsibleMixin instead.")
diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/plugins/module_utils/raw.py b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/module_utils/raw.py
new file mode 100644
index 00000000..a353f1cb
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/module_utils/raw.py
@@ -0,0 +1,97 @@
+#
+# Copyright 2018 Red Hat | Ansible
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import copy
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.kubernetes.plugins.module_utils.common import (
+ K8sAnsibleMixin, AUTH_ARG_SPEC, COMMON_ARG_SPEC, RESOURCE_ARG_SPEC, NAME_ARG_SPEC)
+
+
+class KubernetesRawModule(K8sAnsibleMixin):
+ # NOTE: This class KubernetesRawModule is deprecated in favor of
+ # class K8sAnsibleMixin and will be removed 2.0.0 release.
+ # Please use K8sAnsibleMixin instead.
+ @property
+ def validate_spec(self):
+ return dict(
+ fail_on_error=dict(type='bool'),
+ version=dict(),
+ strict=dict(type='bool', default=True)
+ )
+
+ @property
+ def condition_spec(self):
+ return dict(
+ type=dict(),
+ status=dict(default=True, choices=[True, False, "Unknown"]),
+ reason=dict()
+ )
+
+ @property
+ def argspec(self):
+ argument_spec = copy.deepcopy(COMMON_ARG_SPEC)
+ argument_spec.update(copy.deepcopy(NAME_ARG_SPEC))
+ argument_spec.update(copy.deepcopy(RESOURCE_ARG_SPEC))
+ argument_spec.update(copy.deepcopy(AUTH_ARG_SPEC))
+ argument_spec['merge_type'] = dict(type='list', elements='str', choices=['json', 'merge', 'strategic-merge'])
+ argument_spec['wait'] = dict(type='bool', default=False)
+ argument_spec['wait_sleep'] = dict(type='int', default=5)
+ argument_spec['wait_timeout'] = dict(type='int', default=120)
+ argument_spec['wait_condition'] = dict(type='dict', default=None, options=self.condition_spec)
+ argument_spec['validate'] = dict(type='dict', default=None, options=self.validate_spec)
+ argument_spec['append_hash'] = dict(type='bool', default=False)
+ argument_spec['apply'] = dict(type='bool', default=False)
+ return argument_spec
+
+ def __init__(self, k8s_kind=None, *args, **kwargs):
+ mutually_exclusive = [
+ ('resource_definition', 'src'),
+ ('merge_type', 'apply'),
+ ]
+
+ module = AnsibleModule(
+ argument_spec=self.argspec,
+ mutually_exclusive=mutually_exclusive,
+ supports_check_mode=True,
+ )
+
+ self.module = module
+ self.check_mode = self.module.check_mode
+ self.params = self.module.params
+ self.fail_json = self.module.fail_json
+ self.fail = self.module.fail_json
+ self.exit_json = self.module.exit_json
+
+ self.module.warn("class KubernetesRawModule is deprecated"
+ " and will be removed in 2.0.0. Please use K8sAnsibleMixin instead.")
+ super(KubernetesRawModule, self).__init__(*args, **kwargs)
+
+ self.client = None
+ self.warnings = []
+
+ self.kind = k8s_kind or self.params.get('kind')
+ self.api_version = self.params.get('api_version')
+ self.name = self.params.get('name')
+ self.namespace = self.params.get('namespace')
+
+ self.check_library_version()
+ self.set_resource_definitions()
diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/plugins/module_utils/scale.py b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/module_utils/scale.py
new file mode 100644
index 00000000..55bab010
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/module_utils/scale.py
@@ -0,0 +1,166 @@
+#
+# Copyright 2018 Red Hat | Ansible
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import copy
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.kubernetes.plugins.module_utils.common import (
+ AUTH_ARG_SPEC, RESOURCE_ARG_SPEC, NAME_ARG_SPEC, K8sAnsibleMixin)
+
+try:
+ from openshift.dynamic.exceptions import NotFoundError
+except ImportError:
+ pass
+
+
+SCALE_ARG_SPEC = {
+ 'replicas': {'type': 'int', 'required': True},
+ 'current_replicas': {'type': 'int'},
+ 'resource_version': {},
+ 'wait': {'type': 'bool', 'default': True},
+ 'wait_timeout': {'type': 'int', 'default': 20},
+}
+
+
+class KubernetesAnsibleScaleModule(K8sAnsibleMixin):
+
+ def __init__(self, k8s_kind=None, *args, **kwargs):
+ self.client = None
+ self.warnings = []
+
+ mutually_exclusive = [
+ ('resource_definition', 'src'),
+ ]
+
+ module = AnsibleModule(
+ argument_spec=self.argspec,
+ mutually_exclusive=mutually_exclusive,
+ supports_check_mode=True,
+ )
+
+ self.module = module
+ self.params = self.module.params
+ self.check_mode = self.module.check_mode
+ self.fail_json = self.module.fail_json
+ self.fail = self.module.fail_json
+ self.exit_json = self.module.exit_json
+ super(KubernetesAnsibleScaleModule, self).__init__()
+
+ self.kind = k8s_kind or self.params.get('kind')
+ self.api_version = self.params.get('api_version')
+ self.name = self.params.get('name')
+ self.namespace = self.params.get('namespace')
+ self.set_resource_definitions()
+
+ def execute_module(self):
+ definition = self.resource_definitions[0]
+
+ self.client = self.get_api_client()
+
+ name = definition['metadata']['name']
+ namespace = definition['metadata'].get('namespace')
+ api_version = definition['apiVersion']
+ kind = definition['kind']
+ current_replicas = self.params.get('current_replicas')
+ replicas = self.params.get('replicas')
+ resource_version = self.params.get('resource_version')
+
+ wait = self.params.get('wait')
+ wait_time = self.params.get('wait_timeout')
+ existing = None
+ existing_count = None
+ return_attributes = dict(changed=False, result=dict(), diff=dict())
+ if wait:
+ return_attributes['duration'] = 0
+
+ resource = self.find_resource(kind, api_version, fail=True)
+
+ try:
+ existing = resource.get(name=name, namespace=namespace)
+ return_attributes['result'] = existing.to_dict()
+ except NotFoundError as exc:
+ self.fail_json(msg='Failed to retrieve requested object: {0}'.format(exc),
+ error=exc.value.get('status'))
+
+ if self.kind == 'job':
+ existing_count = existing.spec.parallelism
+ elif hasattr(existing.spec, 'replicas'):
+ existing_count = existing.spec.replicas
+
+ if existing_count is None:
+ self.fail_json(msg='Failed to retrieve the available count for the requested object.')
+
+ if resource_version and resource_version != existing.metadata.resourceVersion:
+ self.exit_json(**return_attributes)
+
+ if current_replicas is not None and existing_count != current_replicas:
+ self.exit_json(**return_attributes)
+
+ if existing_count != replicas:
+ return_attributes['changed'] = True
+ if not self.check_mode:
+ if self.kind == 'job':
+ existing.spec.parallelism = replicas
+ return_attributes['result'] = resource.patch(existing.to_dict()).to_dict()
+ else:
+ return_attributes = self.scale(resource, existing, replicas, wait, wait_time)
+
+ self.exit_json(**return_attributes)
+
+ @property
+ def argspec(self):
+ args = copy.deepcopy(SCALE_ARG_SPEC)
+ args.update(RESOURCE_ARG_SPEC)
+ args.update(NAME_ARG_SPEC)
+ args.update(AUTH_ARG_SPEC)
+ return args
+
+ def scale(self, resource, existing_object, replicas, wait, wait_time):
+ name = existing_object.metadata.name
+ namespace = existing_object.metadata.namespace
+ kind = existing_object.kind
+
+ if not hasattr(resource, 'scale'):
+ self.fail_json(
+ msg="Cannot perform scale on resource of kind {0}".format(resource.kind)
+ )
+
+ scale_obj = {'kind': kind, 'metadata': {'name': name, 'namespace': namespace}, 'spec': {'replicas': replicas}}
+
+ existing = resource.get(name=name, namespace=namespace)
+
+ try:
+ resource.scale.patch(body=scale_obj)
+ except Exception as exc:
+ self.fail_json(msg="Scale request failed: {0}".format(exc))
+
+ k8s_obj = resource.get(name=name, namespace=namespace).to_dict()
+ match, diffs = self.diff_objects(existing.to_dict(), k8s_obj)
+ result = dict()
+ result['result'] = k8s_obj
+ result['changed'] = not match
+ result['diff'] = diffs
+
+ if wait:
+ success, result['result'], result['duration'] = self.wait(resource, scale_obj, 5, wait_time)
+ if not success:
+ self.fail_json(msg="Resource scaling timed out", **result)
+ return result
diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/plugins/modules/__init__.py b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/modules/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/modules/__init__.py
diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/plugins/modules/helm.py b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/modules/helm.py
new file mode 100644
index 00000000..9df885c9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/modules/helm.py
@@ -0,0 +1,573 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: helm
+
+short_description: Manages Kubernetes packages with the Helm package manager
+
+version_added: "0.11.0"
+
+author:
+ - Lucas Boisserie (@LucasBoisserie)
+ - Matthieu Diehr (@d-matt)
+
+requirements:
+ - "helm (https://github.com/helm/helm/releases)"
+ - "yaml (https://pypi.org/project/PyYAML/)"
+
+description:
+ - Install, upgrade, delete packages with the Helm package manager.
+
+options:
+ chart_ref:
+ description:
+ - chart_reference on chart repository.
+ - path to a packaged chart.
+ - path to an unpacked chart directory.
+ - absolute URL.
+ - Required when I(release_state) is set to C(present).
+ required: false
+ type: path
+ chart_repo_url:
+ description:
+ - Chart repository URL where to locate the requested chart.
+ required: false
+ type: str
+ chart_version:
+ description:
+ - Chart version to install. If this is not specified, the latest version is installed.
+ required: false
+ type: str
+ release_name:
+ description:
+ - Release name to manage.
+ required: true
+ type: str
+ aliases: [ name ]
+ release_namespace:
+ description:
+ - Kubernetes namespace where the chart should be installed.
+ required: true
+ type: str
+ aliases: [ namespace ]
+ release_state:
+ choices: ['present', 'absent']
+ description:
+ - Desirated state of release.
+ required: false
+ default: present
+ aliases: [ state ]
+ type: str
+ release_values:
+ description:
+ - Value to pass to chart.
+ required: false
+ default: {}
+ aliases: [ values ]
+ type: dict
+ values_files:
+ description:
+ - Value files to pass to chart.
+ - Paths will be read from the target host's filesystem, not the host running ansible.
+ - values_files option is evaluated before values option if both are used.
+ - Paths are evaluated in the order the paths are specified.
+ required: false
+ default: []
+ type: list
+ elements: str
+ version_added: '1.1.0'
+ update_repo_cache:
+ description:
+ - Run C(helm repo update) before the operation. Can be run as part of the package installation or as a separate step.
+ default: false
+ type: bool
+
+#Helm options
+ disable_hook:
+ description:
+ - Helm option to disable hook on install/upgrade/delete.
+ default: False
+ type: bool
+ force:
+ description:
+ - Helm option to force reinstall, ignore on new install.
+ default: False
+ type: bool
+ purge:
+ description:
+ - Remove the release from the store and make its name free for later use.
+ default: True
+ type: bool
+ wait:
+ description:
+ - Wait until all Pods, PVCs, Services, and minimum number of Pods of a Deployment are in a ready state before marking the release as successful.
+ default: False
+ type: bool
+ wait_timeout:
+ description:
+ - Timeout when wait option is enabled (helm2 is a number of seconds, helm3 is a duration).
+ type: str
+ atomic:
+ description:
+ - If set, the installation process deletes the installation on failure.
+ type: bool
+ default: False
+ create_namespace:
+ description:
+ - Create the release namespace if not present.
+ type: bool
+ default: False
+ version_added: "0.11.1"
+ replace:
+ description:
+ - Reuse the given name, only if that name is a deleted release which remains in the history.
+ - This is unsafe in production environment.
+ type: bool
+ default: False
+ version_added: "1.11.0"
+extends_documentation_fragment:
+ - community.kubernetes.helm_common_options
+'''
+
+EXAMPLES = r'''
+- name: Deploy latest version of Prometheus chart inside monitoring namespace (and create it)
+ community.kubernetes.helm:
+ name: test
+ chart_ref: stable/prometheus
+ release_namespace: monitoring
+ create_namespace: true
+
+# From repository
+- name: Add stable chart repo
+ community.kubernetes.helm_repository:
+ name: stable
+ repo_url: "https://kubernetes-charts.storage.googleapis.com"
+
+- name: Deploy latest version of Grafana chart inside monitoring namespace with values
+ community.kubernetes.helm:
+ name: test
+ chart_ref: stable/grafana
+ release_namespace: monitoring
+ values:
+ replicas: 2
+
+- name: Deploy Grafana chart on 5.0.12 with values loaded from template
+ community.kubernetes.helm:
+ name: test
+ chart_ref: stable/grafana
+ chart_version: 5.0.12
+ values: "{{ lookup('template', 'somefile.yaml') | from_yaml }}"
+
+- name: Deploy Grafana chart using values files on target
+ community.kubernetes.helm:
+ name: test
+ chart_ref: stable/grafana
+ release_namespace: monitoring
+ values_files:
+ - /path/to/values.yaml
+
+- name: Remove test release and waiting suppression ending
+ community.kubernetes.helm:
+ name: test
+ state: absent
+ wait: true
+
+# From git
+- name: Git clone stable repo on HEAD
+ ansible.builtin.git:
+ repo: "http://github.com/helm/charts.git"
+ dest: /tmp/helm_repo
+
+- name: Deploy Grafana chart from local path
+ community.kubernetes.helm:
+ name: test
+ chart_ref: /tmp/helm_repo/stable/grafana
+ release_namespace: monitoring
+
+# From url
+- name: Deploy Grafana chart on 5.0.12 from url
+ community.kubernetes.helm:
+ name: test
+ chart_ref: "https://kubernetes-charts.storage.googleapis.com/grafana-5.0.12.tgz"
+ release_namespace: monitoring
+'''
+
+RETURN = r"""
+status:
+ type: complex
+ description: A dictionary of status output
+ returned: on success Creation/Upgrade/Already deploy
+ contains:
+ appversion:
+ type: str
+ returned: always
+ description: Version of app deployed
+ chart:
+ type: str
+ returned: always
+ description: Chart name and chart version
+ name:
+ type: str
+ returned: always
+ description: Name of the release
+ namespace:
+ type: str
+ returned: always
+ description: Namespace where the release is deployed
+ revision:
+ type: str
+ returned: always
+ description: Number of time where the release has been updated
+ status:
+ type: str
+ returned: always
+ description: Status of release (can be DEPLOYED, FAILED, ...)
+ updated:
+ type: str
+ returned: always
+ description: The Date of last update
+ values:
+ type: str
+ returned: always
+ description: Dict of Values used to deploy
+stdout:
+ type: str
+ description: Full `helm` command stdout, in case you want to display it or examine the event log
+ returned: always
+ sample: ''
+stderr:
+ type: str
+ description: Full `helm` command stderr, in case you want to display it or examine the event log
+ returned: always
+ sample: ''
+command:
+ type: str
+ description: Full `helm` command built by this module, in case you want to re-run the command outside the module or debug a problem.
+ returned: always
+ sample: helm upgrade ...
+"""
+
+import tempfile
+import traceback
+
+try:
+ import yaml
+ IMP_YAML = True
+except ImportError:
+ IMP_YAML_ERR = traceback.format_exc()
+ IMP_YAML = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib, env_fallback
+
+module = None
+
+
+def exec_command(command):
+ rc, out, err = module.run_command(command)
+ if rc != 0:
+ module.fail_json(
+ msg="Failure when executing Helm command. Exited {0}.\nstdout: {1}\nstderr: {2}".format(rc, out, err),
+ stdout=out,
+ stderr=err,
+ command=command,
+ )
+ return rc, out, err
+
+
+def get_values(command, release_name):
+ """
+ Get Values from deployed release
+ """
+
+ get_command = command + " get values --output=yaml " + release_name
+
+ rc, out, err = exec_command(get_command)
+ # Helm 3 return "null" string when no values are set
+ if out.rstrip("\n") == "null":
+ return {}
+ return yaml.safe_load(out)
+
+
+def get_release(state, release_name):
+ """
+ Get Release from all deployed releases
+ """
+
+ if state is not None:
+ for release in state:
+ if release['name'] == release_name:
+ return release
+ return None
+
+
+def get_release_status(command, release_name):
+ """
+ Get Release state from deployed release
+ """
+
+ list_command = command + " list --output=yaml --filter " + release_name
+
+ rc, out, err = exec_command(list_command)
+
+ release = get_release(yaml.safe_load(out), release_name)
+
+ if release is None: # not install
+ return None
+
+ release['values'] = get_values(command, release_name)
+
+ return release
+
+
+def run_repo_update(command):
+ """
+ Run Repo update
+ """
+ repo_update_command = command + " repo update"
+ rc, out, err = exec_command(repo_update_command)
+
+
+def fetch_chart_info(command, chart_ref):
+ """
+ Get chart info
+ """
+ inspect_command = command + " show chart " + chart_ref
+
+ rc, out, err = exec_command(inspect_command)
+
+ return yaml.safe_load(out)
+
+
+def deploy(command, release_name, release_values, chart_name, wait,
+ wait_timeout, disable_hook, force, values_files, atomic=False,
+ create_namespace=False, replace=False):
+ """
+ Install/upgrade/rollback release chart
+ """
+ if replace:
+ # '--replace' is not supported by 'upgrade -i'
+ deploy_command = command + " install"
+ else:
+ deploy_command = command + " upgrade -i" # install/upgrade
+
+ # Always reset values to keep release_values equal to values released
+ deploy_command += " --reset-values"
+
+ if wait:
+ deploy_command += " --wait"
+ if wait_timeout is not None:
+ deploy_command += " --timeout " + wait_timeout
+
+ if atomic:
+ deploy_command += " --atomic"
+
+ if force:
+ deploy_command += " --force"
+
+ if replace:
+ deploy_command += " --replace"
+
+ if disable_hook:
+ deploy_command += " --no-hooks"
+
+ if create_namespace:
+ deploy_command += " --create-namespace"
+
+ if values_files:
+ for value_file in values_files:
+ deploy_command += " --values=" + value_file
+
+ if release_values != {}:
+ fd, path = tempfile.mkstemp(suffix='.yml')
+ with open(path, 'w') as yaml_file:
+ yaml.dump(release_values, yaml_file, default_flow_style=False)
+ deploy_command += " -f=" + path
+
+ deploy_command += " " + release_name + " " + chart_name
+
+ return deploy_command
+
+
+def delete(command, release_name, purge, disable_hook):
+ """
+ Delete release chart
+ """
+
+ delete_command = command + " uninstall "
+
+ if not purge:
+ delete_command += " --keep-history"
+
+ if disable_hook:
+ delete_command += " --no-hooks"
+
+ delete_command += " " + release_name
+
+ return delete_command
+
+
+def main():
+ global module
+ module = AnsibleModule(
+ argument_spec=dict(
+ binary_path=dict(type='path'),
+ chart_ref=dict(type='path'),
+ chart_repo_url=dict(type='str'),
+ chart_version=dict(type='str'),
+ release_name=dict(type='str', required=True, aliases=['name']),
+ release_namespace=dict(type='str', required=True, aliases=['namespace']),
+ release_state=dict(default='present', choices=['present', 'absent'], aliases=['state']),
+ release_values=dict(type='dict', default={}, aliases=['values']),
+ values_files=dict(type='list', default=[], elements='str'),
+ update_repo_cache=dict(type='bool', default=False),
+
+ # Helm options
+ disable_hook=dict(type='bool', default=False),
+ force=dict(type='bool', default=False),
+ kube_context=dict(type='str', aliases=['context'], fallback=(env_fallback, ['K8S_AUTH_CONTEXT'])),
+ kubeconfig_path=dict(type='path', aliases=['kubeconfig'], fallback=(env_fallback, ['K8S_AUTH_KUBECONFIG'])),
+ purge=dict(type='bool', default=True),
+ wait=dict(type='bool', default=False),
+ wait_timeout=dict(type='str'),
+ atomic=dict(type='bool', default=False),
+ create_namespace=dict(type='bool', default=False),
+ replace=dict(type='bool', default=False),
+ ),
+ required_if=[
+ ('release_state', 'present', ['release_name', 'chart_ref']),
+ ('release_state', 'absent', ['release_name'])
+ ],
+ supports_check_mode=True,
+ )
+
+ if not IMP_YAML:
+ module.fail_json(msg=missing_required_lib("yaml"), exception=IMP_YAML_ERR)
+
+ changed = False
+
+ bin_path = module.params.get('binary_path')
+ chart_ref = module.params.get('chart_ref')
+ chart_repo_url = module.params.get('chart_repo_url')
+ chart_version = module.params.get('chart_version')
+ release_name = module.params.get('release_name')
+ release_namespace = module.params.get('release_namespace')
+ release_state = module.params.get('release_state')
+ release_values = module.params.get('release_values')
+ values_files = module.params.get('values_files')
+ update_repo_cache = module.params.get('update_repo_cache')
+
+ # Helm options
+ disable_hook = module.params.get('disable_hook')
+ force = module.params.get('force')
+ kube_context = module.params.get('kube_context')
+ kubeconfig_path = module.params.get('kubeconfig_path')
+ purge = module.params.get('purge')
+ wait = module.params.get('wait')
+ wait_timeout = module.params.get('wait_timeout')
+ atomic = module.params.get('atomic')
+ create_namespace = module.params.get('create_namespace')
+ replace = module.params.get('replace')
+
+ if bin_path is not None:
+ helm_cmd_common = bin_path
+ else:
+ helm_cmd_common = module.get_bin_path('helm', required=True)
+
+ if kube_context is not None:
+ helm_cmd_common += " --kube-context " + kube_context
+
+ if kubeconfig_path is not None:
+ helm_cmd_common += " --kubeconfig " + kubeconfig_path
+
+ if update_repo_cache:
+ run_repo_update(helm_cmd_common)
+
+ helm_cmd_common += " --namespace=" + release_namespace
+
+ # Get real/deployed release status
+ release_status = get_release_status(helm_cmd_common, release_name)
+
+ # keep helm_cmd_common for get_release_status in module_exit_json
+ helm_cmd = helm_cmd_common
+ if release_state == "absent" and release_status is not None:
+ if replace:
+ module.fail_json(msg="replace is not applicable when state is absent")
+
+ helm_cmd = delete(helm_cmd, release_name, purge, disable_hook)
+ changed = True
+ elif release_state == "present":
+
+ if chart_version is not None:
+ helm_cmd += " --version=" + chart_version
+
+ if chart_repo_url is not None:
+ helm_cmd += " --repo=" + chart_repo_url
+
+ # Fetch chart info to have real version and real name for chart_ref from archive, folder or url
+ chart_info = fetch_chart_info(helm_cmd, chart_ref)
+
+ if release_status is None: # Not installed
+ helm_cmd = deploy(helm_cmd, release_name, release_values, chart_ref, wait, wait_timeout,
+ disable_hook, False, values_files=values_files, atomic=atomic,
+ create_namespace=create_namespace, replace=replace)
+ changed = True
+
+ else:
+ # the 'appVersion' specification is optional in a chart
+ chart_app_version = chart_info.get('appVersion', None)
+ released_app_version = release_status.get('app_version', None)
+
+ # when deployed without an 'appVersion' chart value the 'helm list' command will return the entry `app_version: ""`
+ appversion_is_same = (chart_app_version == released_app_version) or (chart_app_version is None and released_app_version == "")
+
+ if force or release_values != release_status['values'] \
+ or (chart_info['name'] + '-' + chart_info['version']) != release_status["chart"] \
+ or not appversion_is_same:
+ helm_cmd = deploy(helm_cmd, release_name, release_values, chart_ref, wait, wait_timeout,
+ disable_hook, force, values_files=values_files, atomic=atomic,
+ create_namespace=create_namespace, replace=replace)
+ changed = True
+
+ if module.check_mode:
+ check_status = {'values': {
+ "current": release_status['values'],
+ "declared": release_values
+ }}
+
+ module.exit_json(
+ changed=changed,
+ command=helm_cmd,
+ status=check_status,
+ stdout='',
+ stderr='',
+ )
+ elif not changed:
+ module.exit_json(
+ changed=False,
+ status=release_status,
+ stdout='',
+ stderr='',
+ command=helm_cmd,
+ )
+
+ rc, out, err = exec_command(helm_cmd)
+
+ module.exit_json(
+ changed=changed,
+ stdout=out,
+ stderr=err,
+ status=get_release_status(helm_cmd_common, release_name),
+ command=helm_cmd,
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/plugins/modules/helm_info.py b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/modules/helm_info.py
new file mode 100644
index 00000000..03ebdde3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/modules/helm_info.py
@@ -0,0 +1,205 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2020, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: helm_info
+
+short_description: Get information from Helm package deployed inside the cluster
+
+version_added: "0.11.0"
+
+author:
+ - Lucas Boisserie (@LucasBoisserie)
+
+requirements:
+ - "helm (https://github.com/helm/helm/releases)"
+ - "yaml (https://pypi.org/project/PyYAML/)"
+
+description:
+ - Get information (values, states, ...) from Helm package deployed inside the cluster.
+
+options:
+ release_name:
+ description:
+ - Release name to manage.
+ required: true
+ type: str
+ aliases: [ name ]
+ release_namespace:
+ description:
+ - Kubernetes namespace where the chart should be installed.
+ required: true
+ type: str
+ aliases: [ namespace ]
+extends_documentation_fragment:
+ - community.kubernetes.helm_common_options
+'''
+
+EXAMPLES = r'''
+- name: Deploy latest version of Grafana chart inside monitoring namespace
+ community.kubernetes.helm_info:
+ name: test
+ release_namespace: monitoring
+'''
+
+RETURN = r'''
+status:
+ type: complex
+ description: A dictionary of status output
+ returned: only when release exists
+ contains:
+ appversion:
+ type: str
+ returned: always
+ description: Version of app deployed
+ chart:
+ type: str
+ returned: always
+ description: Chart name and chart version
+ name:
+ type: str
+ returned: always
+ description: Name of the release
+ namespace:
+ type: str
+ returned: always
+ description: Namespace where the release is deployed
+ revision:
+ type: str
+ returned: always
+ description: Number of time where the release has been updated
+ status:
+ type: str
+ returned: always
+ description: Status of release (can be DEPLOYED, FAILED, ...)
+ updated:
+ type: str
+ returned: always
+ description: The Date of last update
+ values:
+ type: str
+ returned: always
+ description: Dict of Values used to deploy
+'''
+
+import traceback
+
+try:
+ import yaml
+ IMP_YAML = True
+except ImportError:
+ IMP_YAML_ERR = traceback.format_exc()
+ IMP_YAML = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib, env_fallback
+
+module = None
+
+
+# Get Values from deployed release
+def get_values(command, release_name):
+ get_command = command + " get values --output=yaml " + release_name
+
+ rc, out, err = module.run_command(get_command)
+
+ if rc != 0:
+ module.fail_json(
+ msg="Failure when executing Helm command. Exited {0}.\nstdout: {1}\nstderr: {2}".format(rc, out, err),
+ command=get_command
+ )
+
+ # Helm 3 return "null" string when no values are set
+ if out.rstrip("\n") == "null":
+ return {}
+ else:
+ return yaml.safe_load(out)
+
+
+# Get Release from all deployed releases
+def get_release(state, release_name):
+ if state is not None:
+ for release in state:
+ if release['name'] == release_name:
+ return release
+ return None
+
+
+# Get Release state from deployed release
+def get_release_status(command, release_name):
+ list_command = command + " list --output=yaml --filter " + release_name
+
+ rc, out, err = module.run_command(list_command)
+
+ if rc != 0:
+ module.fail_json(
+ msg="Failure when executing Helm command. Exited {0}.\nstdout: {1}\nstderr: {2}".format(rc, out, err),
+ command=list_command
+ )
+
+ release = get_release(yaml.safe_load(out), release_name)
+
+ if release is None: # not install
+ return None
+
+ release['values'] = get_values(command, release_name)
+
+ return release
+
+
+def main():
+ global module
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ binary_path=dict(type='path'),
+ release_name=dict(type='str', required=True, aliases=['name']),
+ release_namespace=dict(type='str', required=True, aliases=['namespace']),
+
+ # Helm options
+ kube_context=dict(type='str', aliases=['context'], fallback=(env_fallback, ['K8S_AUTH_CONTEXT'])),
+ kubeconfig_path=dict(type='path', aliases=['kubeconfig'], fallback=(env_fallback, ['K8S_AUTH_KUBECONFIG'])),
+ ),
+ supports_check_mode=True,
+ )
+
+ if not IMP_YAML:
+ module.fail_json(msg=missing_required_lib("yaml"), exception=IMP_YAML_ERR)
+
+ bin_path = module.params.get('binary_path')
+ release_name = module.params.get('release_name')
+ release_namespace = module.params.get('release_namespace')
+
+ # Helm options
+ kube_context = module.params.get('kube_context')
+ kubeconfig_path = module.params.get('kubeconfig_path')
+
+ if bin_path is not None:
+ helm_cmd_common = bin_path
+ else:
+ helm_cmd_common = module.get_bin_path('helm', required=True)
+
+ if kube_context is not None:
+ helm_cmd_common += " --kube-context " + kube_context
+
+ if kubeconfig_path is not None:
+ helm_cmd_common += " --kubeconfig " + kubeconfig_path
+
+ helm_cmd_common += " --namespace=" + release_namespace
+
+ release_status = get_release_status(helm_cmd_common, release_name)
+
+ if release_status is not None:
+ module.exit_json(changed=False, status=release_status)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/plugins/modules/helm_plugin.py b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/modules/helm_plugin.py
new file mode 100644
index 00000000..e5e28a4b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/modules/helm_plugin.py
@@ -0,0 +1,242 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2020, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: helm_plugin
+short_description: Manage Helm plugins
+version_added: "1.0.0"
+author:
+ - Abhijeet Kasurde (@Akasurde)
+requirements:
+ - "helm (https://github.com/helm/helm/releases)"
+description:
+ - Manages Helm plugins.
+options:
+ release_namespace:
+ description:
+ - Kubernetes namespace where the helm plugin should be installed.
+ required: true
+ type: str
+ aliases: [ namespace ]
+
+#Helm options
+ state:
+ description:
+ - If C(state=present) the Helm plugin will be installed.
+ - If C(state=absent) the Helm plugin will be removed.
+ choices: [ absent, present ]
+ default: present
+ type: str
+ plugin_name:
+ description:
+ - Name of Helm plugin.
+ - Required only if C(state=absent).
+ type: str
+ plugin_path:
+ description:
+ - Plugin path to a plugin on your local file system or a url of a remote VCS repo.
+ - If plugin path from file system is provided, make sure that tar is present on remote
+ machine and not on Ansible controller.
+ - Required only if C(state=present).
+ type: str
+extends_documentation_fragment:
+ - community.kubernetes.helm_common_options
+'''
+
+EXAMPLES = r'''
+- name: Install Helm env plugin
+ community.kubernetes.helm_plugin:
+ plugin_path: https://github.com/adamreese/helm-env
+ state: present
+
+- name: Install Helm plugin from local filesystem
+ community.kubernetes.helm_plugin:
+ plugin_path: https://domain/path/to/plugin.tar.gz
+ state: present
+
+- name: Remove Helm env plugin
+ community.kubernetes.helm_plugin:
+ plugin_name: env
+ state: absent
+'''
+
+RETURN = r'''
+stdout:
+ type: str
+ description: Full `helm` command stdout, in case you want to display it or examine the event log
+ returned: always
+ sample: ''
+stderr:
+ type: str
+ description: Full `helm` command stderr, in case you want to display it or examine the event log
+ returned: always
+ sample: ''
+command:
+ type: str
+ description: Full `helm` command built by this module, in case you want to re-run the command outside the module or debug a problem.
+ returned: always
+ sample: helm plugin list ...
+msg:
+ type: str
+ description: Info about successful command
+ returned: always
+ sample: "Plugin installed successfully"
+rc:
+ type: int
+ description: Helm plugin command return code
+ returned: always
+ sample: 1
+'''
+
+from ansible.module_utils.basic import AnsibleModule, env_fallback
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ binary_path=dict(type='path'),
+ release_namespace=dict(type='str', required=True, aliases=['namespace']),
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ plugin_path=dict(type='str',),
+ plugin_name=dict(type='str',),
+ # Helm options
+ context=dict(type='str', aliases=['kube_context'], fallback=(env_fallback, ['K8S_AUTH_CONTEXT'])),
+ kubeconfig=dict(type='path', aliases=['kubeconfig_path'], fallback=(env_fallback, ['K8S_AUTH_KUBECONFIG'])),
+ ),
+ supports_check_mode=True,
+ required_if=[
+ ("state", "present", ("plugin_path",)),
+ ("state", "absent", ("plugin_name",)),
+ ],
+ mutually_exclusive=[
+ ['plugin_name', 'plugin_path'],
+ ],
+ )
+
+ bin_path = module.params.get('binary_path')
+ release_namespace = module.params.get('release_namespace')
+ state = module.params.get('state')
+
+ # Helm options
+ kube_context = module.params.get('context')
+ kubeconfig_path = module.params.get('kubeconfig')
+
+ if bin_path is not None:
+ helm_cmd_common = bin_path
+ else:
+ helm_cmd_common = 'helm'
+
+ helm_cmd_common = module.get_bin_path(helm_cmd_common, required=True)
+
+ helm_cmd_common += " plugin"
+
+ if kube_context is not None:
+ helm_cmd_common += " --kube-context " + kube_context
+
+ if kubeconfig_path is not None:
+ helm_cmd_common += " --kubeconfig " + kubeconfig_path
+
+ helm_cmd_common += " --namespace=" + release_namespace
+
+ if state == 'present':
+ helm_cmd_common += " install %s" % module.params.get('plugin_path')
+ if not module.check_mode:
+ rc, out, err = module.run_command(helm_cmd_common)
+ else:
+ rc, out, err = (0, '', '')
+
+ if rc == 1 and 'plugin already exists' in err:
+ module.exit_json(
+ failed=False,
+ changed=False,
+ msg="Plugin already exists",
+ command=helm_cmd_common,
+ stdout=out,
+ stderr=err,
+ rc=rc
+ )
+ elif rc == 0:
+ module.exit_json(
+ failed=False,
+ changed=True,
+ msg="Plugin installed successfully",
+ command=helm_cmd_common,
+ stdout=out,
+ stderr=err,
+ rc=rc,
+ )
+ else:
+ module.fail_json(
+ msg="Failure when executing Helm command.",
+ command=helm_cmd_common,
+ stdout=out,
+ stderr=err,
+ rc=rc,
+ )
+ elif state == 'absent':
+ plugin_name = module.params.get('plugin_name')
+ helm_plugin_list = helm_cmd_common + " list"
+ rc, out, err = module.run_command(helm_plugin_list)
+ if rc != 0 or (out == '' and err == ''):
+ module.fail_json(
+ msg="Failed to get Helm plugin info",
+ command=helm_plugin_list,
+ stdout=out,
+ stderr=err,
+ rc=rc,
+ )
+
+ if out:
+ found = False
+ for line in out.splitlines():
+ if line.startswith("NAME"):
+ continue
+ name, dummy, dummy = line.split('\t', 3)
+ name = name.strip()
+ if name == plugin_name:
+ found = True
+ break
+ if found:
+ helm_uninstall_cmd = "%s uninstall %s" % (helm_cmd_common, plugin_name)
+ if not module.check_mode:
+ rc, out, err = module.run_command(helm_uninstall_cmd)
+ else:
+ rc, out, err = (0, '', '')
+
+ if rc == 0:
+ module.exit_json(
+ changed=True,
+ msg="Plugin uninstalled successfully",
+ command=helm_uninstall_cmd,
+ stdout=out,
+ stderr=err,
+ rc=rc
+ )
+ module.fail_json(
+ msg="Failed to get Helm plugin uninstall",
+ command=helm_uninstall_cmd,
+ stdout=out,
+ stderr=err,
+ rc=rc,
+ )
+ else:
+ module.exit_json(
+ failed=False,
+ changed=False,
+ msg="Plugin not found or is already uninstalled",
+ command=helm_plugin_list,
+ stdout=out,
+ stderr=err,
+ rc=rc
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/plugins/modules/helm_plugin_info.py b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/modules/helm_plugin_info.py
new file mode 100644
index 00000000..26664b43
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/modules/helm_plugin_info.py
@@ -0,0 +1,167 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2020, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: helm_plugin_info
+short_description: Gather information about Helm plugins
+version_added: "1.0.0"
+author:
+ - Abhijeet Kasurde (@Akasurde)
+requirements:
+ - "helm (https://github.com/helm/helm/releases)"
+description:
+ - Gather information about Helm plugins installed in namespace.
+options:
+ release_namespace:
+ description:
+ - Kubernetes namespace where the helm plugins are installed.
+ required: true
+ type: str
+ aliases: [ namespace ]
+
+#Helm options
+ plugin_name:
+ description:
+ - Name of Helm plugin, to gather particular plugin info.
+ type: str
+extends_documentation_fragment:
+ - community.kubernetes.helm_common_options
+'''
+
+EXAMPLES = r'''
+- name: Gather Helm plugin info
+ community.kubernetes.helm_plugin_info:
+
+- name: Gather Helm env plugin info
+ community.kubernetes.helm_plugin_info:
+ plugin_name: env
+'''
+
+RETURN = r'''
+stdout:
+ type: str
+ description: Full `helm` command stdout, in case you want to display it or examine the event log
+ returned: always
+ sample: ''
+stderr:
+ type: str
+ description: Full `helm` command stderr, in case you want to display it or examine the event log
+ returned: always
+ sample: ''
+command:
+ type: str
+ description: Full `helm` command built by this module, in case you want to re-run the command outside the module or debug a problem.
+ returned: always
+ sample: helm plugin list ...
+plugin_list:
+ type: list
+ description: Helm plugin dict inside a list
+ returned: always
+ sample: {
+ "name": "env",
+ "version": "0.1.0",
+ "description": "Print out the helm environment."
+ }
+rc:
+ type: int
+ description: Helm plugin command return code
+ returned: always
+ sample: 1
+'''
+
+from ansible.module_utils.basic import AnsibleModule, env_fallback
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ binary_path=dict(type='path'),
+ release_namespace=dict(type='str', required=True, aliases=['namespace']),
+ plugin_name=dict(type='str',),
+ # Helm options
+ context=dict(type='str', aliases=['kube_context'], fallback=(env_fallback, ['K8S_AUTH_CONTEXT'])),
+ kubeconfig=dict(type='path', aliases=['kubeconfig_path'], fallback=(env_fallback, ['K8S_AUTH_KUBECONFIG'])),
+ ),
+ supports_check_mode=True,
+ )
+
+ bin_path = module.params.get('binary_path')
+ release_namespace = module.params.get('release_namespace')
+
+ # Helm options
+ kube_context = module.params.get('context')
+ kubeconfig_path = module.params.get('kubeconfig')
+
+ if bin_path is not None:
+ helm_cmd_common = bin_path
+ else:
+ helm_cmd_common = 'helm'
+
+ helm_cmd_common = module.get_bin_path(helm_cmd_common, required=True)
+
+ helm_cmd_common += " plugin"
+
+ if kube_context is not None:
+ helm_cmd_common += " --kube-context " + kube_context
+
+ if kubeconfig_path is not None:
+ helm_cmd_common += " --kubeconfig " + kubeconfig_path
+
+ helm_cmd_common += " --namespace=" + release_namespace
+
+ plugin_name = module.params.get('plugin_name')
+ helm_plugin_list = helm_cmd_common + " list"
+ rc, out, err = module.run_command(helm_plugin_list)
+ if rc != 0 or (out == '' and err == ''):
+ module.fail_json(
+ msg="Failed to get Helm plugin info",
+ command=helm_plugin_list,
+ stdout=out,
+ stderr=err,
+ rc=rc,
+ )
+
+ plugin_list = []
+ if out:
+ for line in out.splitlines():
+ if line.startswith("NAME"):
+ continue
+ name, version, description = line.split('\t', 3)
+ name = name.strip()
+ version = version.strip()
+ description = description.strip()
+ if plugin_name is None:
+ plugin_list.append({
+ 'name': name,
+ 'version': version,
+ 'description': description,
+ })
+ continue
+
+ if plugin_name == name:
+ plugin_list.append({
+ 'name': name,
+ 'version': version,
+ 'description': description,
+ })
+ break
+
+ module.exit_json(
+ changed=True,
+ command=helm_plugin_list,
+ stdout=out,
+ stderr=err,
+ rc=rc,
+ plugin_list=plugin_list,
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/plugins/modules/helm_repository.py b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/modules/helm_repository.py
new file mode 100644
index 00000000..d8722e63
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/modules/helm_repository.py
@@ -0,0 +1,212 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2020, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: helm_repository
+
+short_description: Manage Helm repositories.
+
+version_added: "0.11.0"
+
+author:
+ - Lucas Boisserie (@LucasBoisserie)
+
+requirements:
+ - "helm (https://github.com/helm/helm/releases)"
+ - "yaml (https://pypi.org/project/PyYAML/)"
+
+description:
+ - Manage Helm repositories.
+
+options:
+ binary_path:
+ description:
+ - The path of a helm binary to use.
+ required: false
+ type: path
+ repo_name:
+ description:
+ - Chart repository name.
+ required: true
+ type: str
+ aliases: [ name ]
+ repo_url:
+ description:
+ - Chart repository url
+ type: str
+ aliases: [ url ]
+ repo_username:
+ description:
+ - Chart repository username for repository with basic auth.
+ - Required if chart_repo_password is specified.
+ required: false
+ type: str
+ aliases: [ username ]
+ repo_password:
+ description:
+ - Chart repository password for repository with basic auth.
+ - Required if chart_repo_username is specified.
+ required: false
+ type: str
+ aliases: [ password ]
+ repo_state:
+ choices: ['present', 'absent']
+ description:
+ - Desired state of repository.
+ required: false
+ default: present
+ aliases: [ state ]
+ type: str
+'''
+
+EXAMPLES = r'''
+- name: Add default repository
+ community.kubernetes.helm_repository:
+ name: stable
+ repo_url: https://kubernetes-charts.storage.googleapis.com
+
+- name: Add Red Hat Helm charts repository
+ community.kubernetes.helm_repository:
+ name: redhat-charts
+ repo_url: https://redhat-developer.github.com/redhat-helm-charts
+'''
+
+RETURN = r''' # '''
+
+import traceback
+
+try:
+ import yaml
+ IMP_YAML = True
+except ImportError:
+ IMP_YAML_ERR = traceback.format_exc()
+ IMP_YAML = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+module = None
+
+
+# Get repository from all repositories added
+def get_repository(state, repo_name):
+ if state is not None:
+ for repository in state:
+ if repository['name'] == repo_name:
+ return repository
+ return None
+
+
+# Get repository status
+def get_repository_status(command, repository_name):
+ list_command = command + " repo list --output=yaml"
+
+ rc, out, err = module.run_command(list_command)
+
+ # no repo => rc=1 and 'no repositories to show' in output
+ if rc == 1 and "no repositories to show" in err:
+ return None
+ elif rc != 0:
+ module.fail_json(
+ msg="Failure when executing Helm command. Exited {0}.\nstdout: {1}\nstderr: {2}".format(rc, out, err),
+ command=list_command
+ )
+
+ return get_repository(yaml.safe_load(out), repository_name)
+
+
+# Install repository
+def install_repository(command, repository_name, repository_url, repository_username, repository_password):
+ install_command = command + " repo add " + repository_name + " " + repository_url
+
+ if repository_username is not None and repository_password is not None:
+ install_command += " --username=" + repository_username
+ install_command += " --password=" + repository_password
+
+ return install_command
+
+
+# Delete repository
+def delete_repository(command, repository_name):
+ remove_command = command + " repo rm " + repository_name
+
+ return remove_command
+
+
+def main():
+ global module
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ binary_path=dict(type='path'),
+ repo_name=dict(type='str', aliases=['name'], required=True),
+ repo_url=dict(type='str', aliases=['url']),
+ repo_username=dict(type='str', aliases=['username']),
+ repo_password=dict(type='str', aliases=['password'], no_log=True),
+ repo_state=dict(default='present', choices=['present', 'absent'], aliases=['state']),
+ ),
+ required_together=[
+ ['repo_username', 'repo_password']
+ ],
+ required_if=[
+ ('repo_state', 'present', ['repo_url']),
+ ],
+ supports_check_mode=True,
+ )
+
+ if not IMP_YAML:
+ module.fail_json(msg=missing_required_lib("yaml"), exception=IMP_YAML_ERR)
+
+ changed = False
+
+ bin_path = module.params.get('binary_path')
+ repo_name = module.params.get('repo_name')
+ repo_url = module.params.get('repo_url')
+ repo_username = module.params.get('repo_username')
+ repo_password = module.params.get('repo_password')
+ repo_state = module.params.get('repo_state')
+
+ if bin_path is not None:
+ helm_cmd = bin_path
+ else:
+ helm_cmd = module.get_bin_path('helm', required=True)
+
+ repository_status = get_repository_status(helm_cmd, repo_name)
+
+ if repo_state == "absent" and repository_status is not None:
+ helm_cmd = delete_repository(helm_cmd, repo_name)
+ changed = True
+ elif repo_state == "present":
+ if repository_status is None:
+ helm_cmd = install_repository(helm_cmd, repo_name, repo_url, repo_username, repo_password)
+ changed = True
+ elif repository_status['url'] != repo_url:
+ module.fail_json(msg="Repository already have a repository named {0}".format(repo_name))
+
+ if module.check_mode:
+ module.exit_json(changed=changed)
+ elif not changed:
+ module.exit_json(changed=False, repo_name=repo_name, repo_url=repo_url)
+
+ rc, out, err = module.run_command(helm_cmd)
+
+ if repo_password is not None:
+ helm_cmd = helm_cmd.replace(repo_password, '******')
+
+ if rc != 0:
+ module.fail_json(
+ msg="Failure when executing Helm command. Exited {0}.\nstdout: {1}\nstderr: {2}".format(rc, out, err),
+ command=helm_cmd
+ )
+
+ module.exit_json(changed=changed, stdout=out, stderr=err, command=helm_cmd)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/plugins/modules/k8s.py b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/modules/k8s.py
new file mode 100644
index 00000000..18e33dfe
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/modules/k8s.py
@@ -0,0 +1,320 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2018, Chris Houseknecht <@chouseknecht>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+
+module: k8s
+
+short_description: Manage Kubernetes (K8s) objects
+
+author:
+ - "Chris Houseknecht (@chouseknecht)"
+ - "Fabian von Feilitzsch (@fabianvf)"
+
+description:
+ - Use the OpenShift Python client to perform CRUD operations on K8s objects.
+ - Pass the object definition from a source file or inline. See examples for reading
+ files and using Jinja templates or vault-encrypted files.
+ - Access to the full range of K8s APIs.
+ - Use the M(community.kubernetes.k8s_info) module to obtain a list of items about an object of type C(kind)
+ - Authenticate using either a config file, certificates, password or token.
+ - Supports check mode.
+
+extends_documentation_fragment:
+ - community.kubernetes.k8s_state_options
+ - community.kubernetes.k8s_name_options
+ - community.kubernetes.k8s_resource_options
+ - community.kubernetes.k8s_auth_options
+ - community.kubernetes.k8s_wait_options
+
+notes:
+ - If your OpenShift Python library is not 0.9.0 or newer and you are trying to
+ remove an item from an associative array/dictionary, for example a label or
+ an annotation, you will need to explicitly set the value of the item to be
+ removed to `null`. Simply deleting the entry in the dictionary will not
+ remove it from openshift or kubernetes.
+
+options:
+ merge_type:
+ description:
+ - Whether to override the default patch merge approach with a specific type. By default, the strategic
+ merge will typically be used.
+ - For example, Custom Resource Definitions typically aren't updatable by the usual strategic merge. You may
+ want to use C(merge) if you see "strategic merge patch format is not supported"
+ - See U(https://kubernetes.io/docs/tasks/run-application/update-api-object-kubectl-patch/#use-a-json-merge-patch-to-update-a-deployment)
+ - Requires openshift >= 0.6.2
+ - If more than one merge_type is given, the merge_types will be tried in order
+ - If openshift >= 0.6.2, this defaults to C(['strategic-merge', 'merge']), which is ideal for using the same parameters
+ on resource kinds that combine Custom Resources and built-in resources. For openshift < 0.6.2, the default
+ is simply C(strategic-merge).
+ - mutually exclusive with C(apply)
+ choices:
+ - json
+ - merge
+ - strategic-merge
+ type: list
+ elements: str
+ validate:
+ description:
+ - how (if at all) to validate the resource definition against the kubernetes schema.
+ Requires the kubernetes-validate python module and openshift >= 0.8.0
+ suboptions:
+ fail_on_error:
+ description: whether to fail on validation errors.
+ type: bool
+ version:
+ description: version of Kubernetes to validate against. defaults to Kubernetes server version
+ type: str
+ strict:
+ description: whether to fail when passing unexpected properties
+ default: True
+ type: bool
+ type: dict
+ append_hash:
+ description:
+ - Whether to append a hash to a resource name for immutability purposes
+ - Applies only to ConfigMap and Secret resources
+ - The parameter will be silently ignored for other resource kinds
+ - The full definition of an object is needed to generate the hash - this means that deleting an object created with append_hash
+ will only work if the same object is passed with state=absent (alternatively, just use state=absent with the name including
+ the generated hash and append_hash=no)
+ - Requires openshift >= 0.7.2
+ type: bool
+ apply:
+ description:
+ - C(apply) compares the desired resource definition with the previously supplied resource definition,
+ ignoring properties that are automatically generated
+ - C(apply) works better with Services than 'force=yes'
+ - Requires openshift >= 0.9.2
+ - mutually exclusive with C(merge_type)
+ type: bool
+ template:
+ description:
+ - Provide a valid YAML template definition file for an object when creating or updating.
+ - Value can be provided as string or dictionary.
+ - Mutually exclusive with C(src) and C(resource_definition).
+ - Template files needs to be present on the Ansible Controller's file system.
+ - Additional parameters can be specified using dictionary.
+ - 'Valid additional parameters - '
+ - 'C(newline_sequence) (str): Specify the newline sequence to use for templating files.
+ valid choices are "\n", "\r", "\r\n". Default value "\n".'
+ - 'C(block_start_string) (str): The string marking the beginning of a block.
+ Default value "{%".'
+ - 'C(block_end_string) (str): The string marking the end of a block.
+ Default value "%}".'
+ - 'C(variable_start_string) (str): The string marking the beginning of a print statement.
+ Default value "{{".'
+ - 'C(variable_end_string) (str): The string marking the end of a print statement.
+ Default value "}}".'
+ - 'C(trim_blocks) (bool): Determine when newlines should be removed from blocks. When set to C(yes) the first newline
+ after a block is removed (block, not variable tag!). Default value is true.'
+ - 'C(lstrip_blocks) (bool): Determine when leading spaces and tabs should be stripped.
+ When set to C(yes) leading spaces and tabs are stripped from the start of a line to a block.
+ This functionality requires Jinja 2.7 or newer. Default value is false.'
+ type: raw
+
+requirements:
+ - "python >= 2.7"
+ - "openshift >= 0.6"
+ - "PyYAML >= 3.11"
+'''
+
+EXAMPLES = r'''
+- name: Create a k8s namespace
+ community.kubernetes.k8s:
+ name: testing
+ api_version: v1
+ kind: Namespace
+ state: present
+
+- name: Create a Service object from an inline definition
+ community.kubernetes.k8s:
+ state: present
+ definition:
+ apiVersion: v1
+ kind: Service
+ metadata:
+ name: web
+ namespace: testing
+ labels:
+ app: galaxy
+ service: web
+ spec:
+ selector:
+ app: galaxy
+ service: web
+ ports:
+ - protocol: TCP
+ targetPort: 8000
+ name: port-8000-tcp
+ port: 8000
+
+- name: Remove an existing Service object
+ community.kubernetes.k8s:
+ state: absent
+ api_version: v1
+ kind: Service
+ namespace: testing
+ name: web
+
+# Passing the object definition from a file
+
+- name: Create a Deployment by reading the definition from a local file
+ community.kubernetes.k8s:
+ state: present
+ src: /testing/deployment.yml
+
+- name: >-
+ Read definition file from the Ansible controller file system.
+ If the definition file has been encrypted with Ansible Vault it will automatically be decrypted.
+ community.kubernetes.k8s:
+ state: present
+ definition: "{{ lookup('file', '/testing/deployment.yml') | from_yaml }}"
+
+- name: Read definition template file from the Ansible controller file system
+ community.kubernetes.k8s:
+ state: present
+ template: '/testing/deployment.j2'
+
+- name: Read definition template file from the Ansible controller file system that uses custom start/end strings
+ community.kubernetes.k8s:
+ state: present
+ template:
+ path: '/testing/deployment.j2'
+ variable_start_string: '[['
+ variable_end_string: ']]'
+
+- name: fail on validation errors
+ community.kubernetes.k8s:
+ state: present
+ definition: "{{ lookup('template', '/testing/deployment.yml') | from_yaml }}"
+ validate:
+ fail_on_error: yes
+
+- name: warn on validation errors, check for unexpected properties
+ community.kubernetes.k8s:
+ state: present
+ definition: "{{ lookup('template', '/testing/deployment.yml') | from_yaml }}"
+ validate:
+ fail_on_error: no
+ strict: yes
+'''
+
+RETURN = r'''
+result:
+ description:
+ - The created, patched, or otherwise present object. Will be empty in the case of a deletion.
+ returned: success
+ type: complex
+ contains:
+ api_version:
+ description: The versioned schema of this representation of an object.
+ returned: success
+ type: str
+ kind:
+ description: Represents the REST resource this object represents.
+ returned: success
+ type: str
+ metadata:
+ description: Standard object metadata. Includes name, namespace, annotations, labels, etc.
+ returned: success
+ type: complex
+ spec:
+ description: Specific attributes of the object. Will vary based on the I(api_version) and I(kind).
+ returned: success
+ type: complex
+ status:
+ description: Current status details for the object.
+ returned: success
+ type: complex
+ items:
+ description: Returned only when multiple yaml documents are passed to src or resource_definition
+ returned: when resource_definition or src contains list of objects
+ type: list
+ duration:
+ description: elapsed time of task in seconds
+ returned: when C(wait) is true
+ type: int
+ sample: 48
+'''
+
+import copy
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.kubernetes.plugins.module_utils.common import (
+ K8sAnsibleMixin, COMMON_ARG_SPEC, NAME_ARG_SPEC, RESOURCE_ARG_SPEC, AUTH_ARG_SPEC, WAIT_ARG_SPEC)
+
+
+class KubernetesModule(K8sAnsibleMixin):
+
+ @property
+ def validate_spec(self):
+ return dict(
+ fail_on_error=dict(type='bool'),
+ version=dict(),
+ strict=dict(type='bool', default=True)
+ )
+
+ @property
+ def argspec(self):
+ argument_spec = copy.deepcopy(COMMON_ARG_SPEC)
+ argument_spec.update(copy.deepcopy(NAME_ARG_SPEC))
+ argument_spec.update(copy.deepcopy(RESOURCE_ARG_SPEC))
+ argument_spec.update(copy.deepcopy(AUTH_ARG_SPEC))
+ argument_spec.update(copy.deepcopy(WAIT_ARG_SPEC))
+ argument_spec['merge_type'] = dict(type='list', elements='str', choices=['json', 'merge', 'strategic-merge'])
+ argument_spec['validate'] = dict(type='dict', default=None, options=self.validate_spec)
+ argument_spec['append_hash'] = dict(type='bool', default=False)
+ argument_spec['apply'] = dict(type='bool', default=False)
+ argument_spec['template'] = dict(type='raw', default=None)
+ return argument_spec
+
+ def __init__(self, k8s_kind=None, *args, **kwargs):
+ mutually_exclusive = [
+ ('resource_definition', 'src'),
+ ('merge_type', 'apply'),
+ ('template', 'resource_definition'),
+ ('template', 'src'),
+ ]
+
+ module = AnsibleModule(
+ argument_spec=self.argspec,
+ mutually_exclusive=mutually_exclusive,
+ supports_check_mode=True,
+ )
+
+ self.module = module
+ self.check_mode = self.module.check_mode
+ self.params = self.module.params
+ self.fail_json = self.module.fail_json
+ self.fail = self.module.fail_json
+ self.exit_json = self.module.exit_json
+
+ super(KubernetesModule, self).__init__(*args, **kwargs)
+
+ self.client = None
+ self.warnings = []
+
+ self.kind = k8s_kind or self.params.get('kind')
+ self.api_version = self.params.get('api_version')
+ self.name = self.params.get('name')
+ self.namespace = self.params.get('namespace')
+
+ self.check_library_version()
+ self.set_resource_definitions()
+
+
+def main():
+ KubernetesModule().execute_module()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/plugins/modules/k8s_auth.py b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/modules/k8s_auth.py
new file mode 100644
index 00000000..3af297ba
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/modules/k8s_auth.py
@@ -0,0 +1,336 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2018, KubeVirt Team <@kubevirt>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+
+module: k8s_auth
+
+short_description: Authenticate to Kubernetes clusters which require an explicit login step
+
+author: KubeVirt Team (@kubevirt)
+
+description:
+ - "This module handles authenticating to Kubernetes clusters requiring I(explicit) authentication procedures,
+ meaning ones where a client logs in (obtains an authentication token), performs API operations using said
+ token and then logs out (revokes the token). An example of a Kubernetes distribution requiring this module
+ is OpenShift."
+ - "On the other hand a popular configuration for username+password authentication is one utilizing HTTP Basic
+ Auth, which does not involve any additional login/logout steps (instead login credentials can be attached
+ to each and every API call performed) and as such is handled directly by the C(k8s) module (and other
+ resource–specific modules) by utilizing the C(host), C(username) and C(password) parameters. Please
+ consult your preferred module's documentation for more details."
+
+options:
+ state:
+ description:
+ - If set to I(present) connect to the API server using the URL specified in C(host) and attempt to log in.
+ - If set to I(absent) attempt to log out by revoking the authentication token specified in C(api_key).
+ default: present
+ choices:
+ - present
+ - absent
+ type: str
+ host:
+ description:
+ - Provide a URL for accessing the API server.
+ required: true
+ type: str
+ username:
+ description:
+ - Provide a username for authenticating with the API server.
+ type: str
+ password:
+ description:
+ - Provide a password for authenticating with the API server.
+ type: str
+ ca_cert:
+ description:
+ - "Path to a CA certificate file used to verify connection to the API server. The full certificate chain
+ must be provided to avoid certificate validation errors."
+ aliases: [ ssl_ca_cert ]
+ type: path
+ validate_certs:
+ description:
+ - "Whether or not to verify the API server's SSL certificates."
+ type: bool
+ default: true
+ aliases: [ verify_ssl ]
+ api_key:
+ description:
+ - When C(state) is set to I(absent), this specifies the token to revoke.
+ type: str
+
+requirements:
+ - python >= 2.7
+ - urllib3
+ - requests
+ - requests-oauthlib
+'''
+
+EXAMPLES = r'''
+- hosts: localhost
+ module_defaults:
+ group/k8s:
+ host: https://k8s.example.com/
+ ca_cert: ca.pem
+ tasks:
+ - block:
+ # It's good practice to store login credentials in a secure vault and not
+ # directly in playbooks.
+ - include_vars: k8s_passwords.yml
+
+ - name: Log in (obtain access token)
+ community.kubernetes.k8s_auth:
+ username: admin
+ password: "{{ k8s_admin_password }}"
+ register: k8s_auth_results
+
+ # Previous task provides the token/api_key, while all other parameters
+ # are taken from module_defaults
+ - name: Get a list of all pods from any namespace
+ community.kubernetes.k8s_info:
+ api_key: "{{ k8s_auth_results.k8s_auth.api_key }}"
+ kind: Pod
+ register: pod_list
+
+ always:
+ - name: If login succeeded, try to log out (revoke access token)
+ when: k8s_auth_results.k8s_auth.api_key is defined
+ community.kubernetes.k8s_auth:
+ state: absent
+ api_key: "{{ k8s_auth_results.k8s_auth.api_key }}"
+'''
+
+# Returned value names need to match k8s modules parameter names, to make it
+# easy to pass returned values of k8s_auth to other k8s modules.
+# Discussion: https://github.com/ansible/ansible/pull/50807#discussion_r248827899
+RETURN = r'''
+k8s_auth:
+ description: Kubernetes authentication facts.
+ returned: success
+ type: complex
+ contains:
+ api_key:
+ description: Authentication token.
+ returned: success
+ type: str
+ host:
+ description: URL for accessing the API server.
+ returned: success
+ type: str
+ ca_cert:
+ description: Path to a CA certificate file used to verify connection to the API server.
+ returned: success
+ type: str
+ validate_certs:
+ description: "Whether or not to verify the API server's SSL certificates."
+ returned: success
+ type: bool
+ username:
+ description: Username for authenticating with the API server.
+ returned: success
+ type: str
+'''
+
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib_parse import urlparse, parse_qs, urlencode
+
+# 3rd party imports
+try:
+ import requests
+ HAS_REQUESTS = True
+except ImportError:
+ HAS_REQUESTS = False
+
+try:
+ from requests_oauthlib import OAuth2Session
+ HAS_REQUESTS_OAUTH = True
+except ImportError:
+ HAS_REQUESTS_OAUTH = False
+
+try:
+ from urllib3.util import make_headers
+ HAS_URLLIB3 = True
+except ImportError:
+ HAS_URLLIB3 = False
+
+
+K8S_AUTH_ARG_SPEC = {
+ 'state': {
+ 'default': 'present',
+ 'choices': ['present', 'absent'],
+ },
+ 'host': {'required': True},
+ 'username': {},
+ 'password': {'no_log': True},
+ 'ca_cert': {'type': 'path', 'aliases': ['ssl_ca_cert']},
+ 'validate_certs': {
+ 'type': 'bool',
+ 'default': True,
+ 'aliases': ['verify_ssl']
+ },
+ 'api_key': {'no_log': True},
+}
+
+
+class KubernetesAuthModule(AnsibleModule):
+ def __init__(self):
+ AnsibleModule.__init__(
+ self,
+ argument_spec=K8S_AUTH_ARG_SPEC,
+ required_if=[
+ ('state', 'present', ['username', 'password']),
+ ('state', 'absent', ['api_key']),
+ ]
+ )
+
+ if not HAS_REQUESTS:
+ self.fail("This module requires the python 'requests' package. Try `pip install requests`.")
+
+ if not HAS_REQUESTS_OAUTH:
+ self.fail("This module requires the python 'requests-oauthlib' package. Try `pip install requests-oauthlib`.")
+
+ if not HAS_URLLIB3:
+ self.fail("This module requires the python 'urllib3' package. Try `pip install urllib3`.")
+
+ def execute_module(self):
+ state = self.params.get('state')
+ verify_ssl = self.params.get('validate_certs')
+ ssl_ca_cert = self.params.get('ca_cert')
+
+ self.auth_username = self.params.get('username')
+ self.auth_password = self.params.get('password')
+ self.auth_api_key = self.params.get('api_key')
+ self.con_host = self.params.get('host')
+
+ # python-requests takes either a bool or a path to a ca file as the 'verify' param
+ if verify_ssl and ssl_ca_cert:
+ self.con_verify_ca = ssl_ca_cert # path
+ else:
+ self.con_verify_ca = verify_ssl # bool
+
+ # Get needed info to access authorization APIs
+ self.openshift_discover()
+
+ if state == 'present':
+ new_api_key = self.openshift_login()
+ result = dict(
+ host=self.con_host,
+ validate_certs=verify_ssl,
+ ca_cert=ssl_ca_cert,
+ api_key=new_api_key,
+ username=self.auth_username,
+ )
+ else:
+ self.openshift_logout()
+ result = dict()
+
+ self.exit_json(changed=False, k8s_auth=result)
+
+ def openshift_discover(self):
+ url = '{0}/.well-known/oauth-authorization-server'.format(self.con_host)
+ ret = requests.get(url, verify=self.con_verify_ca)
+
+ if ret.status_code != 200:
+ self.fail_request("Couldn't find OpenShift's OAuth API", method='GET', url=url,
+ reason=ret.reason, status_code=ret.status_code)
+
+ try:
+ oauth_info = ret.json()
+
+ self.openshift_auth_endpoint = oauth_info['authorization_endpoint']
+ self.openshift_token_endpoint = oauth_info['token_endpoint']
+ except Exception:
+ self.fail_json(msg="Something went wrong discovering OpenShift OAuth details.",
+ exception=traceback.format_exc())
+
+ def openshift_login(self):
+ os_oauth = OAuth2Session(client_id='openshift-challenging-client')
+ authorization_url, state = os_oauth.authorization_url(self.openshift_auth_endpoint,
+ state="1", code_challenge_method='S256')
+ auth_headers = make_headers(basic_auth='{0}:{1}'.format(self.auth_username, self.auth_password))
+
+ # Request authorization code using basic auth credentials
+ ret = os_oauth.get(
+ authorization_url,
+ headers={'X-Csrf-Token': state, 'authorization': auth_headers.get('authorization')},
+ verify=self.con_verify_ca,
+ allow_redirects=False
+ )
+
+ if ret.status_code != 302:
+ self.fail_request("Authorization failed.", method='GET', url=authorization_url,
+ reason=ret.reason, status_code=ret.status_code)
+
+ # In here we have `code` and `state`, I think `code` is the important one
+ qwargs = {}
+ for k, v in parse_qs(urlparse(ret.headers['Location']).query).items():
+ qwargs[k] = v[0]
+ qwargs['grant_type'] = 'authorization_code'
+
+ # Using authorization code given to us in the Location header of the previous request, request a token
+ ret = os_oauth.post(
+ self.openshift_token_endpoint,
+ headers={
+ 'Accept': 'application/json',
+ 'Content-Type': 'application/x-www-form-urlencoded',
+ # This is just base64 encoded 'openshift-challenging-client:'
+ 'Authorization': 'Basic b3BlbnNoaWZ0LWNoYWxsZW5naW5nLWNsaWVudDo='
+ },
+ data=urlencode(qwargs),
+ verify=self.con_verify_ca
+ )
+
+ if ret.status_code != 200:
+ self.fail_request("Failed to obtain an authorization token.", method='POST',
+ url=self.openshift_token_endpoint,
+ reason=ret.reason, status_code=ret.status_code)
+
+ return ret.json()['access_token']
+
+ def openshift_logout(self):
+ url = '{0}/apis/oauth.openshift.io/v1/oauthaccesstokens/{1}'.format(self.con_host, self.auth_api_key)
+ headers = {
+ 'Accept': 'application/json',
+ 'Content-Type': 'application/json',
+ 'Authorization': 'Bearer {0}'.format(self.auth_api_key)
+ }
+ json = {
+ "apiVersion": "oauth.openshift.io/v1",
+ "kind": "DeleteOptions"
+ }
+
+ requests.delete(url, headers=headers, json=json, verify=self.con_verify_ca)
+ # Ignore errors, the token will time out eventually anyway
+
+ def fail(self, msg=None):
+ self.fail_json(msg=msg)
+
+ def fail_request(self, msg, **kwargs):
+ req_info = {}
+ for k, v in kwargs.items():
+ req_info['req_' + k] = v
+ self.fail_json(msg=msg, **req_info)
+
+
+def main():
+ module = KubernetesAuthModule()
+ try:
+ module.execute_module()
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/plugins/modules/k8s_cluster_info.py b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/modules/k8s_cluster_info.py
new file mode 100644
index 00000000..e01009d6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/modules/k8s_cluster_info.py
@@ -0,0 +1,247 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2020, Abhijeet Kasurde
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+module: k8s_cluster_info
+
+version_added: "0.11.1"
+
+short_description: Describe Kubernetes (K8s) cluster, APIs available and their respective versions
+
+author:
+ - Abhijeet Kasurde (@Akasurde)
+
+description:
+ - Use the OpenShift Python client to perform read operations on K8s objects.
+ - Authenticate using either a config file, certificates, password or token.
+ - Supports check mode.
+
+options:
+ invalidate_cache:
+ description:
+ - Invalidate cache before retrieving information about cluster.
+ type: bool
+ default: True
+
+extends_documentation_fragment:
+ - community.kubernetes.k8s_auth_options
+
+requirements:
+ - "python >= 2.7"
+ - "openshift >= 0.6"
+ - "PyYAML >= 3.11"
+'''
+
+EXAMPLES = r'''
+- name: Get Cluster information
+ community.kubernetes.k8s_cluster_info:
+ register: api_status
+
+- name: Do not invalidate cache before getting information
+ community.kubernetes.k8s_cluster_info:
+ invalidate_cache: False
+ register: api_status
+'''
+
+RETURN = r'''
+connection:
+ description:
+ - Connection information
+ returned: success
+ type: dict
+ contains:
+ cert_file:
+ description:
+ - Path to client certificate.
+ type: str
+ returned: success
+ host:
+ description:
+ - Host URL
+ type: str
+ returned: success
+ password:
+ description:
+ - User password
+ type: str
+ returned: success
+ proxy:
+ description:
+ - Proxy details
+ type: str
+ returned: success
+ ssl_ca_cert:
+ description:
+ - Path to CA certificate
+ type: str
+ returned: success
+ username:
+ description:
+ - Username
+ type: str
+ returned: success
+ verify_ssl:
+ description:
+ - SSL verification status
+ type: bool
+ returned: success
+version:
+ description:
+ - Information about server and client version
+ returned: success
+ type: dict
+ contains:
+ server:
+ description: Server version
+ returned: success
+ type: dict
+ client:
+ description: Client version
+ returned: success
+ type: str
+apis:
+ description:
+ - The API(s) that exists in dictionary
+ returned: success
+ type: dict
+ contains:
+ api_version:
+ description: API version
+ returned: success
+ type: str
+ categories:
+ description: API categories
+ returned: success
+ type: list
+ group_version:
+ description: Resource Group version
+ returned: success
+ type: str
+ kind:
+ description: Resource kind
+ returned: success
+ type: str
+ name:
+ description: Resource short name
+ returned: success
+ type: str
+ namespaced:
+ description: If resource is namespaced
+ returned: success
+ type: bool
+ preferred:
+ description: If resource version preferred
+ returned: success
+ type: bool
+ short_names:
+ description: Resource short names
+ returned: success
+ type: str
+ singular_name:
+ description: Resource singular name
+ returned: success
+ type: str
+ available_api_version:
+ description: All available versions of the given API
+ returned: success
+ type: list
+ preferred_api_version:
+ description: Preferred version of the given API
+ returned: success
+ type: str
+'''
+
+
+import copy
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.parsing.convert_bool import boolean
+from ansible_collections.community.kubernetes.plugins.module_utils.common import K8sAnsibleMixin, AUTH_ARG_SPEC
+
+try:
+ try:
+ from openshift import __version__ as version
+ # >=0.10
+ from openshift.dynamic.resource import ResourceList
+ except ImportError:
+ # <0.10
+ from openshift.dynamic.client import ResourceList
+ HAS_K8S_INSTANCE_HELPER = True
+ k8s_import_exception = None
+except ImportError:
+ HAS_K8S_INSTANCE_HELPER = False
+ k8s_import_exception = traceback.format_exc()
+
+
+class KubernetesInfoModule(K8sAnsibleMixin):
+
+ def __init__(self):
+ module = AnsibleModule(
+ argument_spec=self.argspec,
+ supports_check_mode=True,
+ )
+ self.module = module
+ self.params = self.module.params
+
+ if not HAS_K8S_INSTANCE_HELPER:
+ self.module.fail_json(msg=missing_required_lib("openshift >= 0.6.2", reason="for merge_type"),
+ exception=k8s_import_exception)
+
+ super(KubernetesInfoModule, self).__init__()
+
+ def execute_module(self):
+ self.client = self.get_api_client()
+ invalidate_cache = boolean(self.module.params.get('invalidate_cache', True), strict=False)
+ if invalidate_cache:
+ self.client.resources.invalidate_cache()
+ results = {}
+ for resource in list(self.client.resources):
+ resource = resource[0]
+ if isinstance(resource, ResourceList):
+ continue
+ results[resource.group] = {
+ 'api_version': resource.group_version,
+ 'categories': resource.categories if resource.categories else [],
+ 'kind': resource.kind,
+ 'name': resource.name,
+ 'namespaced': resource.namespaced,
+ 'preferred': resource.preferred,
+ 'short_names': resource.short_names if resource.short_names else [],
+ 'singular_name': resource.singular_name,
+ }
+ configuration = self.client.configuration
+ connection = {
+ 'cert_file': configuration.cert_file,
+ 'host': configuration.host,
+ 'password': configuration.password,
+ 'proxy': configuration.proxy,
+ 'ssl_ca_cert': configuration.ssl_ca_cert,
+ 'username': configuration.username,
+ 'verify_ssl': configuration.verify_ssl,
+ }
+ version_info = {
+ 'client': version,
+ 'server': self.client.version,
+ }
+ self.module.exit_json(changed=False, apis=results, connection=connection, version=version_info)
+
+ @property
+ def argspec(self):
+ spec = copy.deepcopy(AUTH_ARG_SPEC)
+ spec['invalidate_cache'] = dict(type='bool', default=True)
+ return spec
+
+
+def main():
+ KubernetesInfoModule().execute_module()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/plugins/modules/k8s_exec.py b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/modules/k8s_exec.py
new file mode 100644
index 00000000..e540b9b6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/modules/k8s_exec.py
@@ -0,0 +1,205 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2020, Red Hat
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+
+module: k8s_exec
+
+short_description: Execute command in Pod
+
+version_added: "0.10.0"
+
+author: "Tristan de Cacqueray (@tristanC)"
+
+description:
+ - Use the Kubernetes Python client to execute command on K8s pods.
+
+extends_documentation_fragment:
+ - community.kubernetes.k8s_auth_options
+
+requirements:
+ - "python >= 2.7"
+ - "openshift == 0.4.3"
+ - "PyYAML >= 3.11"
+
+notes:
+- Return code C(return_code) for the command executed is added in output in version 1.0.0.
+options:
+ proxy:
+ description:
+ - The URL of an HTTP proxy to use for the connection.
+ - Can also be specified via I(K8S_AUTH_PROXY) environment variable.
+ - Please note that this module does not pick up typical proxy settings from the environment (e.g. HTTP_PROXY).
+ type: str
+ namespace:
+ description:
+ - The pod namespace name
+ type: str
+ required: yes
+ pod:
+ description:
+ - The pod name
+ type: str
+ required: yes
+ container:
+ description:
+ - The name of the container in the pod to connect to.
+ - Defaults to only container if there is only one container in the pod.
+ type: str
+ required: no
+ command:
+ description:
+ - The command to execute
+ type: str
+ required: yes
+'''
+
+EXAMPLES = r'''
+- name: Execute a command
+ community.kubernetes.k8s_exec:
+ namespace: myproject
+ pod: zuul-scheduler
+ command: zuul-scheduler full-reconfigure
+
+- name: Check RC status of command executed
+ community.kubernetes.k8s_exec:
+ namespace: myproject
+ pod: busybox-test
+ command: cmd_with_non_zero_exit_code
+ register: command_status
+ ignore_errors: True
+
+- name: Check last command status
+ debug:
+ msg: "cmd failed"
+ when: command_status.return_code != 0
+'''
+
+RETURN = r'''
+result:
+ description:
+ - The command object
+ returned: success
+ type: complex
+ contains:
+ stdout:
+ description: The command stdout
+ type: str
+ stdout_lines:
+ description: The command stdout
+ type: str
+ stderr:
+ description: The command stderr
+ type: str
+ stderr_lines:
+ description: The command stderr
+ type: str
+ return_code:
+ description: The command status code
+ type: int
+'''
+
+import copy
+import shlex
+
+try:
+ import yaml
+except ImportError:
+ # ImportError are managed by the common module already.
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+from ansible_collections.community.kubernetes.plugins.module_utils.common import (
+ K8sAnsibleMixin, AUTH_ARG_SPEC
+)
+
+try:
+ from kubernetes.client.apis import core_v1_api
+ from kubernetes.stream import stream
+except ImportError:
+ # ImportError are managed by the common module already.
+ pass
+
+
+class KubernetesExecCommand(K8sAnsibleMixin):
+
+ def __init__(self):
+ module = AnsibleModule(
+ argument_spec=self.argspec,
+ supports_check_mode=True,
+ )
+ self.module = module
+ self.params = self.module.params
+ self.fail_json = self.module.fail_json
+ super(KubernetesExecCommand, self).__init__()
+
+ @property
+ def argspec(self):
+ spec = copy.deepcopy(AUTH_ARG_SPEC)
+ spec['namespace'] = dict(type='str', required=True)
+ spec['pod'] = dict(type='str', required=True)
+ spec['container'] = dict(type='str')
+ spec['command'] = dict(type='str', required=True)
+ return spec
+
+ def execute_module(self):
+ # Load kubernetes.client.Configuration
+ self.get_api_client()
+ api = core_v1_api.CoreV1Api()
+
+ # hack because passing the container as None breaks things
+ optional_kwargs = {}
+ if self.params.get('container'):
+ optional_kwargs['container'] = self.params['container']
+ try:
+ resp = stream(
+ api.connect_get_namespaced_pod_exec,
+ self.params["pod"],
+ self.params["namespace"],
+ command=shlex.split(self.params["command"]),
+ stdout=True,
+ stderr=True,
+ stdin=False,
+ tty=False,
+ _preload_content=False, **optional_kwargs)
+ except Exception as e:
+ self.module.fail_json(msg="Failed to execute on pod %s"
+ " due to : %s" % (self.params.get('pod'), to_native(e)))
+ stdout, stderr, rc = [], [], 0
+ while resp.is_open():
+ resp.update(timeout=1)
+ if resp.peek_stdout():
+ stdout.append(resp.read_stdout())
+ if resp.peek_stderr():
+ stderr.append(resp.read_stderr())
+ err = resp.read_channel(3)
+ err = yaml.safe_load(err)
+ if err['status'] == 'Success':
+ rc = 0
+ else:
+ rc = int(err['details']['causes'][0]['message'])
+
+ self.module.exit_json(
+ # Some command might change environment, but ultimately failing at end
+ changed=True,
+ stdout="".join(stdout),
+ stderr="".join(stderr),
+ return_code=rc
+ )
+
+
+def main():
+ KubernetesExecCommand().execute_module()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/plugins/modules/k8s_info.py b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/modules/k8s_info.py
new file mode 100644
index 00000000..f7a7a0ca
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/modules/k8s_info.py
@@ -0,0 +1,200 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2018, Will Thames <@willthames>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+module: k8s_info
+
+short_description: Describe Kubernetes (K8s) objects
+
+author:
+ - "Will Thames (@willthames)"
+
+description:
+ - Use the OpenShift Python client to perform read operations on K8s objects.
+ - Access to the full range of K8s APIs.
+ - Authenticate using either a config file, certificates, password or token.
+ - Supports check mode.
+ - This module was called C(k8s_facts) before Ansible 2.9. The usage did not change.
+
+options:
+ kind:
+ description:
+ - Use to specify an object model.
+ - Use to create, delete, or discover an object without providing a full resource definition.
+ - Use in conjunction with I(api_version), I(name), and I(namespace) to identify a specific object.
+ - If I(resource definition) is provided, the I(kind) value from the I(resource_definition)
+ will override this option.
+ type: str
+ required: True
+ label_selectors:
+ description: List of label selectors to use to filter results
+ type: list
+ elements: str
+ field_selectors:
+ description: List of field selectors to use to filter results
+ type: list
+ elements: str
+
+extends_documentation_fragment:
+ - community.kubernetes.k8s_auth_options
+ - community.kubernetes.k8s_name_options
+ - community.kubernetes.k8s_wait_options
+
+requirements:
+ - "python >= 2.7"
+ - "openshift >= 0.6"
+ - "PyYAML >= 3.11"
+'''
+
+EXAMPLES = r'''
+- name: Get an existing Service object
+ community.kubernetes.k8s_info:
+ api_version: v1
+ kind: Service
+ name: web
+ namespace: testing
+ register: web_service
+
+- name: Get a list of all service objects
+ community.kubernetes.k8s_info:
+ api_version: v1
+ kind: Service
+ namespace: testing
+ register: service_list
+
+- name: Get a list of all pods from any namespace
+ community.kubernetes.k8s_info:
+ kind: Pod
+ register: pod_list
+
+- name: Search for all Pods labelled app=web
+ community.kubernetes.k8s_info:
+ kind: Pod
+ label_selectors:
+ - app = web
+ - tier in (dev, test)
+
+- name: Using vars while using label_selectors
+ community.kubernetes.k8s_info:
+ kind: Pod
+ label_selectors:
+ - "app = {{ app_label_web }}"
+ vars:
+ app_label_web: web
+
+- name: Search for all running pods
+ community.kubernetes.k8s_info:
+ kind: Pod
+ field_selectors:
+ - status.phase=Running
+
+- name: List custom objects created using CRD
+ community.kubernetes.k8s_info:
+ kind: MyCustomObject
+ api_version: "stable.example.com/v1"
+
+- name: Wait till the Object is created
+ community.kubernetes.k8s_info:
+ kind: Pod
+ wait: yes
+ name: pod-not-yet-created
+ namespace: default
+ wait_sleep: 10
+ wait_timeout: 360
+'''
+
+RETURN = r'''
+resources:
+ description:
+ - The object(s) that exists
+ returned: success
+ type: complex
+ contains:
+ api_version:
+ description: The versioned schema of this representation of an object.
+ returned: success
+ type: str
+ kind:
+ description: Represents the REST resource this object represents.
+ returned: success
+ type: str
+ metadata:
+ description: Standard object metadata. Includes name, namespace, annotations, labels, etc.
+ returned: success
+ type: dict
+ spec:
+ description: Specific attributes of the object. Will vary based on the I(api_version) and I(kind).
+ returned: success
+ type: dict
+ status:
+ description: Current status details for the object.
+ returned: success
+ type: dict
+'''
+
+import copy
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.kubernetes.plugins.module_utils.common import (
+ K8sAnsibleMixin, AUTH_ARG_SPEC, WAIT_ARG_SPEC)
+
+
+class KubernetesInfoModule(K8sAnsibleMixin):
+
+ def __init__(self, *args, **kwargs):
+ module = AnsibleModule(
+ argument_spec=self.argspec,
+ supports_check_mode=True,
+ )
+ self.module = module
+ self.params = self.module.params
+ self.fail_json = self.module.fail_json
+ self.exit_json = self.module.exit_json
+ super(KubernetesInfoModule, self).__init__()
+
+ def execute_module(self):
+ self.client = self.get_api_client()
+
+ self.exit_json(changed=False,
+ **self.kubernetes_facts(self.params['kind'],
+ self.params['api_version'],
+ name=self.params['name'],
+ namespace=self.params['namespace'],
+ label_selectors=self.params['label_selectors'],
+ field_selectors=self.params['field_selectors'],
+ wait=self.params['wait'],
+ wait_sleep=self.params['wait_sleep'],
+ wait_timeout=self.params['wait_timeout'],
+ condition=self.params['wait_condition']))
+
+ @property
+ def argspec(self):
+ args = copy.deepcopy(AUTH_ARG_SPEC)
+ args.update(WAIT_ARG_SPEC)
+ args.update(
+ dict(
+ kind=dict(required=True),
+ api_version=dict(default='v1', aliases=['api', 'version']),
+ name=dict(),
+ namespace=dict(),
+ label_selectors=dict(type='list', elements='str', default=[]),
+ field_selectors=dict(type='list', elements='str', default=[]),
+ )
+ )
+ return args
+
+
+def main():
+ KubernetesInfoModule().execute_module()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/plugins/modules/k8s_log.py b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/modules/k8s_log.py
new file mode 100644
index 00000000..e7b75711
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/modules/k8s_log.py
@@ -0,0 +1,236 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2019, Fabian von Feilitzsch <@fabianvf>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+module: k8s_log
+
+short_description: Fetch logs from Kubernetes resources
+
+version_added: "0.10.0"
+
+author:
+ - "Fabian von Feilitzsch (@fabianvf)"
+
+description:
+ - Use the OpenShift Python client to perform read operations on K8s log endpoints.
+ - Authenticate using either a config file, certificates, password or token.
+ - Supports check mode.
+ - Analogous to `kubectl logs` or `oc logs`
+extends_documentation_fragment:
+ - community.kubernetes.k8s_auth_options
+ - community.kubernetes.k8s_name_options
+options:
+ kind:
+ description:
+ - Use to specify an object model.
+ - Use in conjunction with I(api_version), I(name), and I(namespace) to identify a specific object.
+ - If using I(label_selectors), cannot be overridden.
+ type: str
+ default: Pod
+ name:
+ description:
+ - Use to specify an object name.
+ - Use in conjunction with I(api_version), I(kind) and I(namespace) to identify a specific object.
+ - Only one of I(name) or I(label_selectors) may be provided.
+ type: str
+ label_selectors:
+ description:
+ - List of label selectors to use to filter results
+ - Only one of I(name) or I(label_selectors) may be provided.
+ type: list
+ elements: str
+ container:
+ description:
+ - Use to specify the container within a pod to grab the log from.
+ - If there is only one container, this will default to that container.
+ - If there is more than one container, this option is required.
+ required: no
+ type: str
+
+requirements:
+ - "python >= 2.7"
+ - "openshift >= 0.6"
+ - "PyYAML >= 3.11"
+'''
+
+EXAMPLES = r'''
+- name: Get a log from a Pod
+ community.kubernetes.k8s_log:
+ name: example-1
+ namespace: testing
+ register: log
+
+# This will get the log from the first Pod found matching the selector
+- name: Log a Pod matching a label selector
+ community.kubernetes.k8s_log:
+ namespace: testing
+ label_selectors:
+ - app=example
+ register: log
+
+# This will get the log from a single Pod managed by this Deployment
+- name: Get a log from a Deployment
+ community.kubernetes.k8s_log:
+ api_version: apps/v1
+ kind: Deployment
+ namespace: testing
+ name: example
+ register: log
+
+# This will get the log from a single Pod managed by this DeploymentConfig
+- name: Get a log from a DeploymentConfig
+ community.kubernetes.k8s_log:
+ api_version: apps.openshift.io/v1
+ kind: DeploymentConfig
+ namespace: testing
+ name: example
+ register: log
+'''
+
+RETURN = r'''
+log:
+ type: str
+ description:
+ - The text log of the object
+ returned: success
+log_lines:
+ type: list
+ description:
+ - The log of the object, split on newlines
+ returned: success
+'''
+
+
+import copy
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six import PY2
+
+from ansible_collections.community.kubernetes.plugins.module_utils.common import (
+ K8sAnsibleMixin, AUTH_ARG_SPEC, NAME_ARG_SPEC)
+
+
+class KubernetesLogModule(K8sAnsibleMixin):
+
+ def __init__(self):
+ module = AnsibleModule(
+ argument_spec=self.argspec,
+ supports_check_mode=True,
+ )
+ self.module = module
+ self.params = self.module.params
+ self.fail_json = self.module.fail_json
+ self.fail = self.module.fail_json
+ self.exit_json = self.module.exit_json
+ super(KubernetesLogModule, self).__init__()
+
+ @property
+ def argspec(self):
+ args = copy.deepcopy(AUTH_ARG_SPEC)
+ args.update(NAME_ARG_SPEC)
+ args.update(
+ dict(
+ kind=dict(type='str', default='Pod'),
+ container=dict(),
+ label_selectors=dict(type='list', elements='str', default=[]),
+ )
+ )
+ return args
+
+ def execute_module(self):
+ name = self.params.get('name')
+ namespace = self.params.get('namespace')
+ label_selector = ','.join(self.params.get('label_selectors', {}))
+ if name and label_selector:
+ self.fail(msg='Only one of name or label_selectors can be provided')
+
+ self.client = self.get_api_client()
+ resource = self.find_resource(self.params['kind'], self.params['api_version'], fail=True)
+ v1_pods = self.find_resource('Pod', 'v1', fail=True)
+
+ if 'log' not in resource.subresources:
+ if not name:
+ self.fail(msg='name must be provided for resources that do not support the log subresource')
+ instance = resource.get(name=name, namespace=namespace)
+ label_selector = ','.join(self.extract_selectors(instance))
+ resource = v1_pods
+
+ if label_selector:
+ instances = v1_pods.get(namespace=namespace, label_selector=label_selector)
+ if not instances.items:
+ self.fail(msg='No pods in namespace {0} matched selector {1}'.format(namespace, label_selector))
+ # This matches the behavior of kubectl when logging pods via a selector
+ name = instances.items[0].metadata.name
+ resource = v1_pods
+
+ kwargs = {}
+ if self.params.get('container'):
+ kwargs['query_params'] = dict(container=self.params['container'])
+
+ log = serialize_log(resource.log.get(
+ name=name,
+ namespace=namespace,
+ serialize=False,
+ **kwargs
+ ))
+
+ self.exit_json(changed=False, log=log, log_lines=log.split('\n'))
+
+ def extract_selectors(self, instance):
+ # Parses selectors on an object based on the specifications documented here:
+ # https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
+ selectors = []
+ if not instance.spec.selector:
+ self.fail(msg='{0} {1} does not support the log subresource directly, and no Pod selector was found on the object'.format(
+ '/'.join(instance.group, instance.apiVersion), instance.kind))
+
+ if not (instance.spec.selector.matchLabels or instance.spec.selector.matchExpressions):
+ # A few resources (like DeploymentConfigs) just use a simple key:value style instead of supporting expressions
+ for k, v in dict(instance.spec.selector).items():
+ selectors.append('{0}={1}'.format(k, v))
+ return selectors
+
+ if instance.spec.selector.matchLabels:
+ for k, v in dict(instance.spec.selector.matchLabels).items():
+ selectors.append('{0}={1}'.format(k, v))
+
+ if instance.spec.selector.matchExpressions:
+ for expression in instance.spec.selector.matchExpressions:
+ operator = expression.operator
+
+ if operator == 'Exists':
+ selectors.append(expression.key)
+ elif operator == 'DoesNotExist':
+ selectors.append('!{0}'.format(expression.key))
+ elif operator in ['In', 'NotIn']:
+ selectors.append('{key} {operator} {values}'.format(
+ key=expression.key,
+ operator=operator.lower(),
+ values='({0})'.format(', '.join(expression.values))
+ ))
+ else:
+ self.fail(msg='The k8s_log module does not support the {0} matchExpression operator'.format(operator.lower()))
+
+ return selectors
+
+
+def serialize_log(response):
+ if PY2:
+ return response.data
+ return response.data.decode('utf8')
+
+
+def main():
+ KubernetesLogModule().execute_module()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/plugins/modules/k8s_rollback.py b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/modules/k8s_rollback.py
new file mode 100644
index 00000000..7ccd4153
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/modules/k8s_rollback.py
@@ -0,0 +1,224 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2020, Julien Huon <@julienhuon> Institut National de l'Audiovisuel
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+module: k8s_rollback
+short_description: Rollback Kubernetes (K8S) Deployments and DaemonSets
+version_added: "1.0.0"
+author:
+ - "Julien Huon (@julienhuon)"
+description:
+ - Use the OpenShift Python client to perform the Rollback.
+ - Authenticate using either a config file, certificates, password or token.
+ - Similar to the C(kubectl rollout undo) command.
+options:
+ label_selectors:
+ description: List of label selectors to use to filter results.
+ type: list
+ elements: str
+ field_selectors:
+ description: List of field selectors to use to filter results.
+ type: list
+ elements: str
+extends_documentation_fragment:
+ - community.kubernetes.k8s_auth_options
+ - community.kubernetes.k8s_name_options
+requirements:
+ - "python >= 2.7"
+ - "openshift >= 0.6"
+ - "PyYAML >= 3.11"
+'''
+
+EXAMPLES = r'''
+- name: Rollback a failed deployment
+ community.kubernetes.k8s_rollback:
+ api_version: apps/v1
+ kind: Deployment
+ name: web
+ namespace: testing
+'''
+
+RETURN = r'''
+rollback_info:
+ description:
+ - The object that was rolled back.
+ returned: success
+ type: complex
+ contains:
+ api_version:
+ description: The versioned schema of this representation of an object.
+ returned: success
+ type: str
+ code:
+ description: The HTTP Code of the response
+ returned: success
+ type: str
+ kind:
+ description: Status
+ returned: success
+ type: str
+ metadata:
+ description:
+ - Standard object metadata.
+ - Includes name, namespace, annotations, labels, etc.
+ returned: success
+ type: dict
+ status:
+ description: Current status details for the object.
+ returned: success
+ type: dict
+'''
+
+import copy
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.kubernetes.plugins.module_utils.common import (
+ K8sAnsibleMixin, AUTH_ARG_SPEC, NAME_ARG_SPEC)
+
+
+class KubernetesRollbackModule(K8sAnsibleMixin):
+
+ def __init__(self):
+ module = AnsibleModule(
+ argument_spec=self.argspec,
+ supports_check_mode=True,
+ )
+ self.module = module
+ self.params = self.module.params
+ self.fail_json = self.module.fail_json
+ self.fail = self.module.fail_json
+ self.exit_json = self.module.exit_json
+ super(KubernetesRollbackModule, self).__init__()
+
+ self.kind = self.params['kind']
+ self.api_version = self.params['api_version']
+ self.name = self.params['name']
+ self.namespace = self.params['namespace']
+ self.managed_resource = {}
+
+ if self.kind == "DaemonSet":
+ self.managed_resource['kind'] = "ControllerRevision"
+ self.managed_resource['api_version'] = "apps/v1"
+ elif self.kind == "Deployment":
+ self.managed_resource['kind'] = "ReplicaSet"
+ self.managed_resource['api_version'] = "apps/v1"
+ else:
+ self.fail(msg="Cannot perform rollback on resource of kind {0}".format(self.kind))
+
+ def execute_module(self):
+ results = []
+ self.client = self.get_api_client()
+
+ resources = self.kubernetes_facts(self.kind,
+ self.api_version,
+ self.name,
+ self.namespace,
+ self.params['label_selectors'],
+ self.params['field_selectors'])
+
+ for resource in resources['resources']:
+ result = self.perform_action(resource)
+ results.append(result)
+
+ self.exit_json(**{
+ 'changed': True,
+ 'rollback_info': results
+ })
+
+ def perform_action(self, resource):
+ if self.kind == "DaemonSet":
+ current_revision = resource['metadata']['generation']
+ elif self.kind == "Deployment":
+ current_revision = resource['metadata']['annotations']['deployment.kubernetes.io/revision']
+
+ managed_resources = self.kubernetes_facts(self.managed_resource['kind'],
+ self.managed_resource['api_version'],
+ '',
+ self.namespace,
+ resource['spec']
+ ['selector']
+ ['matchLabels'],
+ '')
+
+ prev_managed_resource = get_previous_revision(managed_resources['resources'],
+ current_revision)
+
+ if self.kind == "Deployment":
+ del prev_managed_resource['spec']['template']['metadata']['labels']['pod-template-hash']
+
+ resource_patch = [{
+ "op": "replace",
+ "path": "/spec/template",
+ "value": prev_managed_resource['spec']['template']
+ }, {
+ "op": "replace",
+ "path": "/metadata/annotations",
+ "value": {
+ "deployment.kubernetes.io/revision": prev_managed_resource['metadata']['annotations']['deployment.kubernetes.io/revision']
+ }
+ }]
+
+ api_target = 'deployments'
+ content_type = 'application/json-patch+json'
+ elif self.kind == "DaemonSet":
+ resource_patch = prev_managed_resource["data"]
+
+ api_target = 'daemonsets'
+ content_type = 'application/strategic-merge-patch+json'
+
+ rollback = self.client.request("PATCH",
+ "/apis/{0}/namespaces/{1}/{2}/{3}"
+ .format(self.api_version,
+ self.namespace,
+ api_target,
+ self.name),
+ body=resource_patch,
+ content_type=content_type)
+
+ result = {'changed': True}
+ result['method'] = 'patch'
+ result['body'] = resource_patch
+ result['resources'] = rollback.to_dict()
+ return result
+
+ @property
+ def argspec(self):
+ args = copy.deepcopy(AUTH_ARG_SPEC)
+ args.update(NAME_ARG_SPEC)
+ args.update(
+ dict(
+ label_selectors=dict(type='list', elements='str', default=[]),
+ field_selectors=dict(type='list', elements='str', default=[]),
+ )
+ )
+ return args
+
+
+def get_previous_revision(all_resources, current_revision):
+ for resource in all_resources:
+ if resource['kind'] == 'ReplicaSet':
+ if int(resource['metadata']
+ ['annotations']
+ ['deployment.kubernetes.io/revision']) == int(current_revision) - 1:
+ return resource
+ elif resource['kind'] == 'ControllerRevision':
+ if int(resource['metadata']
+ ['annotations']
+ ['deprecated.daemonset.template.generation']) == int(current_revision) - 1:
+ return resource
+ return None
+
+
+def main():
+ KubernetesRollbackModule().execute_module()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/plugins/modules/k8s_scale.py b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/modules/k8s_scale.py
new file mode 100644
index 00000000..9e63366a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/modules/k8s_scale.py
@@ -0,0 +1,129 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2018, Chris Houseknecht <@chouseknecht>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+
+module: k8s_scale
+
+short_description: Set a new size for a Deployment, ReplicaSet, Replication Controller, or Job.
+
+author:
+ - "Chris Houseknecht (@chouseknecht)"
+ - "Fabian von Feilitzsch (@fabianvf)"
+
+description:
+ - Similar to the kubectl scale command. Use to set the number of replicas for a Deployment, ReplicaSet,
+ or Replication Controller, or the parallelism attribute of a Job. Supports check mode.
+
+extends_documentation_fragment:
+ - community.kubernetes.k8s_name_options
+ - community.kubernetes.k8s_auth_options
+ - community.kubernetes.k8s_resource_options
+ - community.kubernetes.k8s_scale_options
+
+requirements:
+ - "python >= 2.7"
+ - "openshift >= 0.6"
+ - "PyYAML >= 3.11"
+'''
+
+EXAMPLES = r'''
+- name: Scale deployment up, and extend timeout
+ community.kubernetes.k8s_scale:
+ api_version: v1
+ kind: Deployment
+ name: elastic
+ namespace: myproject
+ replicas: 3
+ wait_timeout: 60
+
+- name: Scale deployment down when current replicas match
+ community.kubernetes.k8s_scale:
+ api_version: v1
+ kind: Deployment
+ name: elastic
+ namespace: myproject
+ current_replicas: 3
+ replicas: 2
+
+- name: Increase job parallelism
+ community.kubernetes.k8s_scale:
+ api_version: batch/v1
+ kind: job
+ name: pi-with-timeout
+ namespace: testing
+ replicas: 2
+
+# Match object using local file or inline definition
+
+- name: Scale deployment based on a file from the local filesystem
+ community.kubernetes.k8s_scale:
+ src: /myproject/elastic_deployment.yml
+ replicas: 3
+ wait: no
+
+- name: Scale deployment based on a template output
+ community.kubernetes.k8s_scale:
+ resource_definition: "{{ lookup('template', '/myproject/elastic_deployment.yml') | from_yaml }}"
+ replicas: 3
+ wait: no
+
+- name: Scale deployment based on a file from the Ansible controller filesystem
+ community.kubernetes.k8s_scale:
+ resource_definition: "{{ lookup('file', '/myproject/elastic_deployment.yml') | from_yaml }}"
+ replicas: 3
+ wait: no
+'''
+
+RETURN = r'''
+result:
+ description:
+ - If a change was made, will return the patched object, otherwise returns the existing object.
+ returned: success
+ type: complex
+ contains:
+ api_version:
+ description: The versioned schema of this representation of an object.
+ returned: success
+ type: str
+ kind:
+ description: Represents the REST resource this object represents.
+ returned: success
+ type: str
+ metadata:
+ description: Standard object metadata. Includes name, namespace, annotations, labels, etc.
+ returned: success
+ type: complex
+ spec:
+ description: Specific attributes of the object. Will vary based on the I(api_version) and I(kind).
+ returned: success
+ type: complex
+ status:
+ description: Current status details for the object.
+ returned: success
+ type: complex
+ duration:
+ description: elapsed time of task in seconds
+ returned: when C(wait) is true
+ type: int
+ sample: 48
+'''
+
+from ansible_collections.community.kubernetes.plugins.module_utils.scale import KubernetesAnsibleScaleModule
+
+
+def main():
+ KubernetesAnsibleScaleModule().execute_module()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/plugins/modules/k8s_service.py b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/modules/k8s_service.py
new file mode 100644
index 00000000..0485d710
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/modules/k8s_service.py
@@ -0,0 +1,272 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2018, KubeVirt Team <@kubevirt>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+
+module: k8s_service
+
+short_description: Manage Services on Kubernetes
+
+author: KubeVirt Team (@kubevirt)
+
+description:
+ - Use Openshift Python SDK to manage Services on Kubernetes
+
+extends_documentation_fragment:
+ - community.kubernetes.k8s_auth_options
+ - community.kubernetes.k8s_resource_options
+ - community.kubernetes.k8s_state_options
+
+options:
+ merge_type:
+ description:
+ - Whether to override the default patch merge approach with a specific type. By default, the strategic
+ merge will typically be used.
+ - For example, Custom Resource Definitions typically aren't updatable by the usual strategic merge. You may
+ want to use C(merge) if you see "strategic merge patch format is not supported"
+ - See U(https://kubernetes.io/docs/tasks/run-application/update-api-object-kubectl-patch/#use-a-json-merge-patch-to-update-a-deployment)
+ - Requires openshift >= 0.6.2
+ - If more than one merge_type is given, the merge_types will be tried in order
+ - If openshift >= 0.6.2, this defaults to C(['strategic-merge', 'merge']), which is ideal for using the same parameters
+ on resource kinds that combine Custom Resources and built-in resources. For openshift < 0.6.2, the default
+ is simply C(strategic-merge).
+ choices:
+ - json
+ - merge
+ - strategic-merge
+ type: list
+ elements: str
+ name:
+ description:
+ - Use to specify a Service object name.
+ required: true
+ type: str
+ namespace:
+ description:
+ - Use to specify a Service object namespace.
+ required: true
+ type: str
+ type:
+ description:
+ - Specifies the type of Service to create.
+ - See U(https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types)
+ choices:
+ - NodePort
+ - ClusterIP
+ - LoadBalancer
+ - ExternalName
+ type: str
+ ports:
+ description:
+ - A list of ports to expose.
+ - U(https://kubernetes.io/docs/concepts/services-networking/service/#multi-port-services)
+ type: list
+ elements: dict
+ selector:
+ description:
+ - Label selectors identify objects this Service should apply to.
+ - U(https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/)
+ type: dict
+ apply:
+ description:
+ - C(apply) compares the desired resource definition with the previously supplied resource definition,
+ ignoring properties that are automatically generated
+ - C(apply) works better with Services than 'force=yes'
+ - mutually exclusive with C(merge_type)
+ type: bool
+
+requirements:
+ - python >= 2.7
+ - openshift >= 0.6.2
+'''
+
+EXAMPLES = r'''
+- name: Expose https port with ClusterIP
+ community.kubernetes.k8s_service:
+ state: present
+ name: test-https
+ namespace: default
+ ports:
+ - port: 443
+ protocol: TCP
+ selector:
+ key: special
+
+- name: Expose https port with ClusterIP using spec
+ community.kubernetes.k8s_service:
+ state: present
+ name: test-https
+ namespace: default
+ inline:
+ spec:
+ ports:
+ - port: 443
+ protocol: TCP
+ selector:
+ key: special
+'''
+
+RETURN = r'''
+result:
+ description:
+ - The created, patched, or otherwise present Service object. Will be empty in the case of a deletion.
+ returned: success
+ type: complex
+ contains:
+ api_version:
+ description: The versioned schema of this representation of an object.
+ returned: success
+ type: str
+ kind:
+ description: Always 'Service'.
+ returned: success
+ type: str
+ metadata:
+ description: Standard object metadata. Includes name, namespace, annotations, labels, etc.
+ returned: success
+ type: complex
+ spec:
+ description: Specific attributes of the object. Will vary based on the I(api_version) and I(kind).
+ returned: success
+ type: complex
+ status:
+ description: Current status details for the object.
+ returned: success
+ type: complex
+'''
+
+import copy
+import traceback
+
+from collections import defaultdict
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.kubernetes.plugins.module_utils.common import (
+ K8sAnsibleMixin, AUTH_ARG_SPEC, COMMON_ARG_SPEC, RESOURCE_ARG_SPEC)
+
+
+SERVICE_ARG_SPEC = {
+ 'apply': {
+ 'type': 'bool',
+ 'default': False,
+ },
+ 'name': {'required': True},
+ 'namespace': {'required': True},
+ 'merge_type': {'type': 'list', 'elements': 'str', 'choices': ['json', 'merge', 'strategic-merge']},
+ 'selector': {'type': 'dict'},
+ 'type': {
+ 'type': 'str',
+ 'choices': [
+ 'NodePort', 'ClusterIP', 'LoadBalancer', 'ExternalName'
+ ],
+ },
+ 'ports': {'type': 'list', 'elements': 'dict'},
+}
+
+
+class KubernetesService(K8sAnsibleMixin):
+ def __init__(self, *args, **kwargs):
+ mutually_exclusive = [
+ ('resource_definition', 'src'),
+ ('merge_type', 'apply'),
+ ]
+
+ module = AnsibleModule(
+ argument_spec=self.argspec,
+ mutually_exclusive=mutually_exclusive,
+ supports_check_mode=True,
+ )
+
+ self.module = module
+ self.check_mode = self.module.check_mode
+ self.params = self.module.params
+ self.fail_json = self.module.fail_json
+ self.fail = self.module.fail_json
+ self.exit_json = self.module.exit_json
+
+ super(KubernetesService, self).__init__(*args, **kwargs)
+
+ self.client = None
+ self.warnings = []
+
+ self.kind = self.params.get('kind')
+ self.api_version = self.params.get('api_version')
+ self.name = self.params.get('name')
+ self.namespace = self.params.get('namespace')
+
+ self.check_library_version()
+ self.set_resource_definitions()
+
+ @staticmethod
+ def merge_dicts(x, y):
+ for k in set(x.keys()).union(y.keys()):
+ if k in x and k in y:
+ if isinstance(x[k], dict) and isinstance(y[k], dict):
+ yield (k, dict(KubernetesService.merge_dicts(x[k], y[k])))
+ else:
+ yield (k, y[k])
+ elif k in x:
+ yield (k, x[k])
+ else:
+ yield (k, y[k])
+
+ @property
+ def argspec(self):
+ """ argspec property builder """
+ argument_spec = copy.deepcopy(AUTH_ARG_SPEC)
+ argument_spec.update(COMMON_ARG_SPEC)
+ argument_spec.update(RESOURCE_ARG_SPEC)
+ argument_spec.update(SERVICE_ARG_SPEC)
+ return argument_spec
+
+ def execute_module(self):
+ """ Module execution """
+ self.client = self.get_api_client()
+
+ api_version = 'v1'
+ selector = self.params.get('selector')
+ service_type = self.params.get('type')
+ ports = self.params.get('ports')
+
+ definition = defaultdict(defaultdict)
+
+ definition['kind'] = 'Service'
+ definition['apiVersion'] = api_version
+
+ def_spec = definition['spec']
+ def_spec['type'] = service_type
+ def_spec['ports'] = ports
+ def_spec['selector'] = selector
+
+ def_meta = definition['metadata']
+ def_meta['name'] = self.params.get('name')
+ def_meta['namespace'] = self.params.get('namespace')
+
+ # 'resource_definition:' has lower priority than module parameters
+ definition = dict(self.merge_dicts(self.resource_definitions[0], definition))
+
+ resource = self.find_resource('Service', api_version, fail=True)
+ definition = self.set_defaults(resource, definition)
+ result = self.perform_action(resource, definition)
+
+ self.exit_json(**result)
+
+
+def main():
+ module = KubernetesService()
+ try:
+ module.execute_module()
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()