diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-14 20:03:01 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-14 20:03:01 +0000 |
commit | a453ac31f3428614cceb99027f8efbdb9258a40b (patch) | |
tree | f61f87408f32a8511cbd91799f9cececb53e0374 /collections-debian-merged/ansible_collections/community/kubernetes | |
parent | Initial commit. (diff) | |
download | ansible-upstream.tar.xz ansible-upstream.zip |
Adding upstream version 2.10.7+merged+base+2.10.8+dfsg.upstream/2.10.7+merged+base+2.10.8+dfsgupstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'collections-debian-merged/ansible_collections/community/kubernetes')
197 files changed, 17006 insertions, 0 deletions
diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/.github/stale.yml b/collections-debian-merged/ansible_collections/community/kubernetes/.github/stale.yml new file mode 100644 index 00000000..230cf78a --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/.github/stale.yml @@ -0,0 +1,60 @@ +--- +# Configuration for probot-stale - https://github.com/probot/stale + +# Number of days of inactivity before an Issue or Pull Request becomes stale +daysUntilStale: 90 + +# Number of days of inactivity before an Issue or Pull Request with the stale +# label is closed. Set to false to disable. If disabled, issues still need to be +# closed manually, but will remain marked as stale. +daysUntilClose: 30 + +# Only issues or pull requests with all of these labels are check if stale. +# Defaults to `[]` (disabled) +onlyLabels: [] + +# Issues or Pull Requests with these labels will never be considered stale. Set +# to `[]` to disable +exemptLabels: + - security + - planned + - priority/critical + - lifecycle/frozen + - verified + +# Set to true to ignore issues in a project (defaults to false) +exemptProjects: false + +# Set to true to ignore issues in a milestone (defaults to false) +exemptMilestones: true + +# Set to true to ignore issues with an assignee (defaults to false) +exemptAssignees: false + +# Label to use when marking as stale +staleLabel: lifecycle/stale + +# Limit the number of actions per hour, from 1-30. Default is 30 +limitPerRun: 30 + +pulls: + markComment: |- + PRs go stale after 90 days of inactivity. + If there is no further activity, the PR will be closed in another 30 days. + + unmarkComment: >- + This pull request is no longer stale. + + closeComment: >- + This pull request has been closed due to inactivity. + +issues: + markComment: |- + Issues go stale after 90 days of inactivity. + If there is no further activity, the issue will be closed in another 30 days. + + unmarkComment: >- + This issue is no longer stale. + + closeComment: >- + This issue has been closed due to inactivity. diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/.github/workflows/ci.yml b/collections-debian-merged/ansible_collections/community/kubernetes/.github/workflows/ci.yml new file mode 100644 index 00000000..a6fa7928 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/.github/workflows/ci.yml @@ -0,0 +1,189 @@ +--- +name: CI +'on': + push: + branches: + - main + pull_request: + schedule: + - cron: '0 6 * * *' + +jobs: + + sanity: + runs-on: ubuntu-latest + strategy: + matrix: + python_version: ['2.7', '3.7'] + steps: + - name: Check out code + uses: actions/checkout@v2 + with: + path: ansible_collections/community/kubernetes + + - name: Set up Python ${{ matrix.python_version }} + uses: actions/setup-python@v1 + with: + python-version: ${{ matrix.python_version }} + + - name: Install ansible base (devel branch) + run: pip install https://github.com/ansible/ansible/archive/devel.tar.gz --disable-pip-version-check + + - name: Run sanity tests on Python ${{ matrix.python_version }} + run: make test-sanity PYTHON_VERSION=${{ matrix.python_version }} + working-directory: ./ansible_collections/community/kubernetes + + integration: + runs-on: ubuntu-latest + strategy: + matrix: + # Our old integration tests fail under newer Python versions. + python_version: ['3.6'] + steps: + - name: Check out code + uses: actions/checkout@v2 + with: + path: ansible_collections/community/kubernetes + + - name: Set up Python ${{ matrix.python_version }} + uses: actions/setup-python@v1 + with: + python-version: ${{ matrix.python_version }} + + - name: Install ansible base (devel branch) + run: pip install https://github.com/ansible/ansible/archive/devel.tar.gz --disable-pip-version-check + + - name: Run integration tests on Python ${{ matrix.python_version }} + run: make test-integration PYTHON_VERSION=${{ matrix.python_version }} + working-directory: ./ansible_collections/community/kubernetes + + - name: Generate coverage report. + run: ansible-test coverage xml -v --requirements --group-by command --group-by version + working-directory: ./ansible_collections/community/kubernetes + + - uses: codecov/codecov-action@v1 + with: + fail_ci_if_error: false + + molecule: + runs-on: ubuntu-latest + strategy: + matrix: + python_version: ['3.7'] + steps: + - name: Check out code + uses: actions/checkout@v2 + with: + path: ansible_collections/community/kubernetes + + - name: Set up KinD cluster + uses: engineerd/setup-kind@v0.4.0 + + - name: Set up Python ${{ matrix.python_version }} + uses: actions/setup-python@v1 + with: + python-version: ${{ matrix.python_version }} + + - name: Install molecule and openshift dependencies + run: pip install ansible molecule yamllint openshift flake8 + + # The latest release doesn't work with Molecule currently. + # See: https://github.com/ansible-community/molecule/issues/2757 + # - name: Install ansible base, latest release. + # run: | + # pip uninstall -y ansible + # pip install --pre ansible-base + + # The devel branch doesn't work with Molecule currently. + # See: https://github.com/ansible-community/molecule/issues/2757 + # - name: Install ansible base (devel branch) + # run: | + # pip uninstall -y ansible + # pip install https://github.com/ansible/ansible/archive/devel.tar.gz --disable-pip-version-check + + - name: Create default collection path symlink + run: | + mkdir -p /home/runner/.ansible + ln -s /home/runner/work/kubernetes/kubernetes /home/runner/.ansible/collections + + - name: Run molecule default test scenario + run: make test-molecule + working-directory: ./ansible_collections/community/kubernetes + + downstream-sanity-29: + runs-on: ubuntu-latest + strategy: + matrix: + python_version: ['3.7'] + steps: + - name: Check out code + uses: actions/checkout@v2 + with: + path: ansible_collections/community/kubernetes + + - name: Set up Python ${{ matrix.python_version }} + uses: actions/setup-python@v1 + with: + python-version: ${{ matrix.python_version }} + + - name: Install ansible base (devel branch) + run: pip install "ansible>=2.9.0,<2.10.0" + + - name: Run sanity tests on Python ${{ matrix.python_version }} + run: make downstream-test-sanity + working-directory: ./ansible_collections/community/kubernetes + + downstream-integration-29: + runs-on: ubuntu-latest + strategy: + matrix: + # Our old integration tests fail under newer Python versions. + python_version: ['3.6'] + steps: + - name: Check out code + uses: actions/checkout@v2 + with: + path: ansible_collections/community/kubernetes + + - name: Set up Python ${{ matrix.python_version }} + uses: actions/setup-python@v1 + with: + python-version: ${{ matrix.python_version }} + + - name: Install ansible base (devel branch) + run: pip install "ansible>=2.9.0,<2.10.0" + + - name: Run integration tests on Python ${{ matrix.python_version }} + run: make downstream-test-integration + working-directory: ./ansible_collections/community/kubernetes + + downstream-molecule-29: + runs-on: ubuntu-latest + strategy: + matrix: + python_version: ['3.7'] + steps: + - name: Check out code + uses: actions/checkout@v2 + with: + path: ansible_collections/community/kubernetes + + - name: Set up KinD cluster + uses: engineerd/setup-kind@v0.4.0 + + - name: Set up Python ${{ matrix.python_version }} + uses: actions/setup-python@v1 + with: + python-version: ${{ matrix.python_version }} + + - name: Install molecule and openshift dependencies + run: pip install "ansible>=2.9.0,<2.10.0" molecule yamllint openshift flake8 + + - name: Create default collection path symlink + run: | + mkdir -p /home/runner/.ansible + ln -s /home/runner/work/kubernetes/kubernetes /home/runner/.ansible/collections + + - name: Run molecule default test scenario + run: make downstream-test-molecule + working-directory: ./ansible_collections/community/kubernetes diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/.gitignore b/collections-debian-merged/ansible_collections/community/kubernetes/.gitignore new file mode 100644 index 00000000..f97b7875 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/.gitignore @@ -0,0 +1,14 @@ +*.retry +.idea +*.log +__pycache__/ + +# Galaxy artifacts. +*.tar.gz + +# Changelog cache files. +changelogs/.plugin-cache.yaml + +# Temporary test files. +tests/output +tests/integration/cloud-config-* diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/.yamllint b/collections-debian-merged/ansible_collections/community/kubernetes/.yamllint new file mode 100644 index 00000000..45b68407 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/.yamllint @@ -0,0 +1,16 @@ +--- +extends: default + +rules: + braces: + max-spaces-inside: 1 + level: error + brackets: + max-spaces-inside: 1 + level: error + document-start: disable + line-length: disable + truthy: disable + indentation: + spaces: 2 + indent-sequences: consistent diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/CHANGELOG.rst b/collections-debian-merged/ansible_collections/community/kubernetes/CHANGELOG.rst new file mode 100644 index 00000000..8712e5ca --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/CHANGELOG.rst @@ -0,0 +1,215 @@ +=================================== +Kubernetes Collection Release Notes +=================================== + +.. contents:: Topics + + +v1.1.1 +====== + +Bugfixes +-------- + +- k8s - Fix sanity test 'compile' failing because of positional args (https://github.com/ansible-collections/community.kubernetes/issues/260). + +v1.1.0 +====== + +Major Changes +------------- + +- k8s - Add support for template parameter (https://github.com/ansible-collections/community.kubernetes/pull/230). +- k8s_* - Add support for vaulted kubeconfig and src (https://github.com/ansible-collections/community.kubernetes/pull/193). + +Minor Changes +------------- + +- Add Makefile and downstream build script for kubernetes.core (https://github.com/ansible-collections/community.kubernetes/pull/197). +- Add execution environment metadata (https://github.com/ansible-collections/community.kubernetes/pull/211). +- Add probot stale bot configuration to autoclose issues (https://github.com/ansible-collections/community.kubernetes/pull/196). +- Added a contribution guide (https://github.com/ansible-collections/community.kubernetes/pull/192). +- Refactor module_utils (https://github.com/ansible-collections/community.kubernetes/pull/223). +- Replace KubernetesAnsibleModule class with dummy class (https://github.com/ansible-collections/community.kubernetes/pull/227). +- Replace KubernetesRawModule class with K8sAnsibleMixin (https://github.com/ansible-collections/community.kubernetes/pull/231). +- common - Do not mark task as changed when diff is irrelevant (https://github.com/ansible-collections/community.kubernetes/pull/228). +- helm - Add appVersion idempotence check to Helm (https://github.com/ansible-collections/community.kubernetes/pull/246). +- helm - Return status in check mode (https://github.com/ansible-collections/community.kubernetes/pull/192). +- helm - Support for single or multiple values files (https://github.com/ansible-collections/community.kubernetes/pull/93). +- helm_* - Support vaulted kubeconfig (https://github.com/ansible-collections/community.kubernetes/pull/229). +- k8s - SelfSubjectAccessReviews supported when 405 response received (https://github.com/ansible-collections/community.kubernetes/pull/237). +- k8s - add testcase for adding multiple resources using template parameter (https://github.com/ansible-collections/community.kubernetes/issues/243). +- k8s_info - Add support for wait (https://github.com/ansible-collections/community.kubernetes/pull/235). +- k8s_info - update custom resource example (https://github.com/ansible-collections/community.kubernetes/issues/202). +- kubectl plugin - correct console log (https://github.com/ansible-collections/community.kubernetes/issues/200). +- raw - Handle exception raised by underlying APIs (https://github.com/ansible-collections/community.kubernetes/pull/180). + +Bugfixes +-------- + +- common - handle exception raised due to DynamicClient (https://github.com/ansible-collections/community.kubernetes/pull/224). +- helm - add replace parameter (https://github.com/ansible-collections/community.kubernetes/issues/106). +- k8s (inventory) - Set the connection plugin and transport separately (https://github.com/ansible-collections/community.kubernetes/pull/208). +- k8s (inventory) - Specify FQCN for k8s inventory plugin to fix use with Ansible 2.9 (https://github.com/ansible-collections/community.kubernetes/pull/250). +- k8s_info - add wait functionality (https://github.com/ansible-collections/community.kubernetes/issues/18). + +v1.0.0 +====== + +Major Changes +------------- + +- helm_plugin - new module to manage Helm plugins (https://github.com/ansible-collections/community.kubernetes/pull/154). +- helm_plugin_info - new modules to gather information about Helm plugins (https://github.com/ansible-collections/community.kubernetes/pull/154). +- k8s_exec - Return rc for the command executed (https://github.com/ansible-collections/community.kubernetes/pull/158). + +Minor Changes +------------- + +- Ensure check mode results are as expected (https://github.com/ansible-collections/community.kubernetes/pull/155). +- Update base branch to 'main' (https://github.com/ansible-collections/community.kubernetes/issues/148). +- helm - Add support for K8S_AUTH_CONTEXT, K8S_AUTH_KUBECONFIG env (https://github.com/ansible-collections/community.kubernetes/pull/141). +- helm - Allow creating namespaces with Helm (https://github.com/ansible-collections/community.kubernetes/pull/157). +- helm - add aliases context for kube_context (https://github.com/ansible-collections/community.kubernetes/pull/152). +- helm - add support for K8S_AUTH_KUBECONFIG and K8S_AUTH_CONTEXT environment variable (https://github.com/ansible-collections/community.kubernetes/issues/140). +- helm_info - add aliases context for kube_context (https://github.com/ansible-collections/community.kubernetes/pull/152). +- helm_info - add support for K8S_AUTH_KUBECONFIG and K8S_AUTH_CONTEXT environment variable (https://github.com/ansible-collections/community.kubernetes/issues/140). +- k8s_exec - return RC for the command executed (https://github.com/ansible-collections/community.kubernetes/issues/122). +- k8s_info - Update example using vars (https://github.com/ansible-collections/community.kubernetes/pull/156). + +Security Fixes +-------------- + +- kubectl - connection plugin now redact kubectl_token and kubectl_password in console log (https://github.com/ansible-collections/community.kubernetes/issues/65). +- kubectl - redacted token and password from console log (https://github.com/ansible-collections/community.kubernetes/pull/159). + +Bugfixes +-------- + +- Test against stable ansible branch so molecule tests work (https://github.com/ansible-collections/community.kubernetes/pull/168). +- Update openshift requirements in k8s module doc (https://github.com/ansible-collections/community.kubernetes/pull/153). + +New Modules +----------- + +- helm_plugin - Manage Helm plugins +- helm_plugin_info - Gather information about Helm plugins + +v0.11.1 +======= + +Major Changes +------------- + +- Add changelog and fragments and document changelog process (https://github.com/ansible-collections/community.kubernetes/pull/131). + +Minor Changes +------------- + +- Add action groups for playbooks with module_defaults (https://github.com/ansible-collections/community.kubernetes/pull/107). +- Add requires_ansible version constraints to runtime.yml (https://github.com/ansible-collections/community.kubernetes/pull/126). +- Add sanity test ignore file for Ansible 2.11 (https://github.com/ansible-collections/community.kubernetes/pull/130). +- Add test for openshift apply bug (https://github.com/ansible-collections/community.kubernetes/pull/94). +- Add version_added to each new collection module (https://github.com/ansible-collections/community.kubernetes/pull/98). +- Check Python code using flake8 (https://github.com/ansible-collections/community.kubernetes/pull/123). +- Don't require project coverage check on PRs (https://github.com/ansible-collections/community.kubernetes/pull/102). +- Improve k8s Deployment and Daemonset wait conditions (https://github.com/ansible-collections/community.kubernetes/pull/35). +- Minor documentation fixes and use of FQCN in some examples (https://github.com/ansible-collections/community.kubernetes/pull/114). +- Remove action_groups_redirection entry from meta/runtime.yml (https://github.com/ansible-collections/community.kubernetes/pull/127). +- Remove deprecated ANSIBLE_METADATA field (https://github.com/ansible-collections/community.kubernetes/pull/95). +- Use FQCN in module docs and plugin examples (https://github.com/ansible-collections/community.kubernetes/pull/146). +- Use improved kubernetes diffs where possible (https://github.com/ansible-collections/community.kubernetes/pull/105). +- helm - add 'atomic' option (https://github.com/ansible-collections/community.kubernetes/pull/115). +- helm - minor code refactoring (https://github.com/ansible-collections/community.kubernetes/pull/110). +- helm_info and helm_repository - minor code refactor (https://github.com/ansible-collections/community.kubernetes/pull/117). +- k8s - Handle set object retrieved from lookup plugin (https://github.com/ansible-collections/community.kubernetes/pull/118). + +Bugfixes +-------- + +- Fix suboption docs structure for inventory plugins (https://github.com/ansible-collections/community.kubernetes/pull/103). +- Handle invalid kubeconfig parsing error (https://github.com/ansible-collections/community.kubernetes/pull/119). +- Make sure Service changes run correctly in check_mode (https://github.com/ansible-collections/community.kubernetes/pull/84). +- k8s_info - remove unneccessary k8s_facts deprecation notice (https://github.com/ansible-collections/community.kubernetes/pull/97). +- k8s_scale - Fix scale wait and add tests (https://github.com/ansible-collections/community.kubernetes/pull/100). +- raw - handle condition when definition is none (https://github.com/ansible-collections/community.kubernetes/pull/139). + +v0.11.0 +======= + +Major Changes +------------- + +- helm - New module for managing Helm charts (https://github.com/ansible-collections/community.kubernetes/pull/61). +- helm_info - New module for retrieving Helm chart information (https://github.com/ansible-collections/community.kubernetes/pull/61). +- helm_repository - New module for managing Helm repositories (https://github.com/ansible-collections/community.kubernetes/pull/61). + +Minor Changes +------------- + +- Rename repository to ``community.kubernetes`` (https://github.com/ansible-collections/community.kubernetes/pull/81). + +Bugfixes +-------- + +- Make sure extra files are not included in built collection (https://github.com/ansible-collections/community.kubernetes/pull/85). +- Update GitHub Actions workflow for better CI stability (https://github.com/ansible-collections/community.kubernetes/pull/78). +- k8s_log - Module no longer attempts to parse log as JSON (https://github.com/ansible-collections/community.kubernetes/pull/69). + +New Modules +----------- + +- helm - Manages Kubernetes packages with the Helm package manager +- helm_info - Get information from Helm package deployed inside the cluster +- helm_repository - Add and remove Helm repository + +v0.10.0 +======= + +Major Changes +------------- + +- k8s_exec - New module for executing commands on pods via Kubernetes API (https://github.com/ansible-collections/community.kubernetes/pull/14). +- k8s_log - New module for retrieving pod logs (https://github.com/ansible-collections/community.kubernetes/pull/16). + +Minor Changes +------------- + +- k8s - Added ``persist_config`` option for persisting refreshed tokens (https://github.com/ansible-collections/community.kubernetes/issues/49). + +Security Fixes +-------------- + +- kubectl - Warn about information disclosure when using options like ``kubectl_password``, ``kubectl_extra_args``, and ``kubectl_token`` to pass data through to the command line using the ``kubectl`` connection plugin (https://github.com/ansible-collections/community.kubernetes/pull/51). + +Bugfixes +-------- + +- k8s - Add exception handling when retrieving k8s client (https://github.com/ansible-collections/community.kubernetes/pull/54). +- k8s - Fix argspec for 'elements' (https://github.com/ansible-collections/community.kubernetes/issues/13). +- k8s - Use ``from_yaml`` filter with lookup examples in ``k8s`` module documentation examples (https://github.com/ansible-collections/community.kubernetes/pull/56). +- k8s_service - Fix argspec (https://github.com/ansible-collections/community.kubernetes/issues/33). +- kubectl - Fix documentation in kubectl connection plugin (https://github.com/ansible-collections/community.kubernetes/pull/52). + +New Modules +----------- + +- k8s_exec - Execute command in Pod +- k8s_log - Fetch logs from Kubernetes resources + +v0.9.0 +====== + +Major Changes +------------- + +- k8s - Inventory source migrated from Ansible 2.9 to Kubernetes collection. +- k8s - Lookup plugin migrated from Ansible 2.9 to Kubernetes collection. +- k8s - Module migrated from Ansible 2.9 to Kubernetes collection. +- k8s_auth - Module migrated from Ansible 2.9 to Kubernetes collection. +- k8s_config_resource_name - Filter plugin migrated from Ansible 2.9 to Kubernetes collection. +- k8s_info - Module migrated from Ansible 2.9 to Kubernetes collection. +- k8s_scale - Module migrated from Ansible 2.9 to Kubernetes collection. +- k8s_service - Module migrated from Ansible 2.9 to Kubernetes collection. +- kubectl - Connection plugin migrated from Ansible 2.9 to Kubernetes collection. +- openshift - Inventory source migrated from Ansible 2.9 to Kubernetes collection. diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/CONTRIBUTING.md b/collections-debian-merged/ansible_collections/community/kubernetes/CONTRIBUTING.md new file mode 100644 index 00000000..0fcae83f --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/CONTRIBUTING.md @@ -0,0 +1,65 @@ +# Contributing + +## Getting Started + +General information about setting up your Python environment, testing modules, +Ansible coding styles, and more can be found in the [Ansible Community Guide]( +https://docs.ansible.com/ansible/latest/community/index.html). + + +## Kubernetes Collections + +### community.kubernetes + +This collection contains modules and plugins contributed and maintained by the Ansible Kubernetes +community. + +New modules and plugins developed by the community should be proposed to `community.kubernetes`. + +## Submitting Issues +All software has bugs, and the `community.kubernetes` collection is no exception. When you find a bug, +you can help tremendously by [telling us about it](https://github.com/ansible-collections/community.kubernetes/issues/new/choose). + +If you should discover that the bug you're trying to file already exists in an issue, +you can help by verifying the behavior of the reported bug with a comment in that +issue, or by reporting any additional information. + +## Pull Requests + +All modules MUST have integration tests for new features. +Bug fixes for modules that currently have integration tests SHOULD have tests added. +New modules should be submitted to the [community.kubernetes](https://github.com/ansible-collections/community.kubernetes) collection and MUST have integration tests. + +Expected test criteria: +* Resource creation under check mode +* Resource creation +* Resource creation again (idempotency) under check mode +* Resource creation again (idempotency) +* Resource modification under check mode +* Resource modification +* Resource modification again (idempotency) under check mode +* Resource modification again (idempotency) +* Resource deletion under check mode +* Resource deletion +* Resource deletion (of a non-existent resource) under check mode +* Resource deletion (of a non-existent resource) + +Where modules have multiple parameters we recommend running through the 4-step modification cycle for each parameter the module accepts, as well as a modification cycle where as most, if not all, parameters are modified at the same time. + +For general information on running the integration tests see the +[Integration Tests page of the Module Development Guide](https://docs.ansible.com/ansible/devel/dev_guide/testing_integration.html#testing-integration), +especially the section on configuration for cloud tests. For questions about writing tests the Ansible Kubernetes community can be found on Freenode IRC as detailed below. + + +### Code of Conduct +The `community.kubernetes` collection follows the Ansible project's +[Code of Conduct](https://docs.ansible.com/ansible/devel/community/code_of_conduct.html). +Please read and familiarize yourself with this document. + +### IRC +Our IRC channels may require you to register your nickname. If you receive an error when you connect, see +[Freenode's Nickname Registration guide](https://freenode.net/kb/answer/registration) for instructions. + +The `#ansible-kubernetes` channel on Freenode IRC is the main and official place to discuss use and development of the `community.kubernetes` collection. + +For more information about Ansible's Kubernetes integration, browse the resources in the [Kubernetes Working Group](https://github.com/ansible/community/wiki/Kubernetes) Community wiki page. diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/FILES.json b/collections-debian-merged/ansible_collections/community/kubernetes/FILES.json new file mode 100644 index 00000000..275779c5 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/FILES.json @@ -0,0 +1,1650 @@ +{ + "files": [ + { + "name": ".", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "CHANGELOG.rst", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a29921140119406d0f539c86ab3d7e1fb441a7b1930430d76e47edc9d05cbdc6", + "format": 1 + }, + { + "name": "molecule", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "molecule/default", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "molecule/default/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "molecule/default/tasks/log.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "23263a92617fd8dd5fd502a1caf08a6f70d4ce3dd62dede7689987d9f5ddd821", + "format": 1 + }, + { + "name": "molecule/default/tasks/append_hash.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "575f3d1f8721cc64912187466ce43449a4e1efe95ef429ad61147ce319fb9849", + "format": 1 + }, + { + "name": "molecule/default/tasks/template.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "aac987b376c2f17d8c0afc8643b263f956f4460d9aae2d2972d0dc6ddf6bb2f0", + "format": 1 + }, + { + "name": "molecule/default/tasks/crd.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "64f186b377f77d620103fe86721c6efd61afb87e741ce1d9f2495cdf63a36fd3", + "format": 1 + }, + { + "name": "molecule/default/tasks/access_review.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "aa73c4a87e4a54f6f58904b793357b19e297a1db6625e6a49e5ed24181cde560", + "format": 1 + }, + { + "name": "molecule/default/tasks/lists.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ca624b0a3bedff164060f0d91dfeefa50306260e4ab3f60dfd0e9a4648aea76f", + "format": 1 + }, + { + "name": "molecule/default/tasks/info.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "90ddee14e8a2446288569956f5f7a6d2ae599d27cd827f872101990670439f02", + "format": 1 + }, + { + "name": "molecule/default/tasks/delete.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "462c29b6500d053414f2ea253f578a1606e065bbcf799287d31e42bbf5afd74e", + "format": 1 + }, + { + "name": "molecule/default/tasks/scale.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "da541942f4bfb46fdc482078435fc46b1e7e905e0b63f66084030f772a302e20", + "format": 1 + }, + { + "name": "molecule/default/tasks/full.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "550a80a521637c5f0a23add96874804efe7f3a08ed76197f549ffc6227977e0e", + "format": 1 + }, + { + "name": "molecule/default/tasks/cluster_info.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "abbbc11dacd0fe70d28e0302090bc1aede4074f3837660fbfff1e73613cb891b", + "format": 1 + }, + { + "name": "molecule/default/tasks/apply.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9253b9853bde8d8ded9dcd64d6d61af408ff8dc6872b6c2dbf4b4f784fa87c46", + "format": 1 + }, + { + "name": "molecule/default/tasks/rollback.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "669597125d9825b4c13c4178c6f6b09a93434a438df0bc1b9064535832d3dcdf", + "format": 1 + }, + { + "name": "molecule/default/tasks/exec.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4de0c8f46e8103577975ce795e7577754cc8110cb4965021524ce11b7b19a110", + "format": 1 + }, + { + "name": "molecule/default/tasks/waiter.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "14f04ddc6da174a3eb11cdc9ae198d6fe624cb4a3764b58247f8843467d16923", + "format": 1 + }, + { + "name": "molecule/default/converge.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "420a4208b2194c7f33d4ceb5998128a9109511a5e63631d2133ff782e7c7a205", + "format": 1 + }, + { + "name": "molecule/default/roles", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "molecule/default/roles/helm", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "molecule/default/roles/helm/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "molecule/default/roles/helm/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5678696fb132c235483923bd99ccd511a3620b5ee9bc0aa93c185bcad61517b1", + "format": 1 + }, + { + "name": "molecule/default/roles/helm/tasks/tests_repository.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "75b448cbb5f62cc60b6a841f40ee99306fb74130775154e3135964eb33ba8e78", + "format": 1 + }, + { + "name": "molecule/default/roles/helm/tasks/test_helm_not_installed.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ae35d2ed9175610f2fd284379c50c133a0ff87dce6a27c7ec4b98bd948e8121f", + "format": 1 + }, + { + "name": "molecule/default/roles/helm/tasks/tests_chart.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c173fb64abcf45dcf7c9891d5f86f620ce8125871f53aebe4d54bdd5ef3c6bb1", + "format": 1 + }, + { + "name": "molecule/default/roles/helm/tasks/tests_helm_plugin.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "05c0e0c272a8be3e20762d1bfa0cbedb8f0d0449e8ecd0d65da35fffaaed966c", + "format": 1 + }, + { + "name": "molecule/default/roles/helm/tasks/install.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "54919db25f314de8e80b1cc800f6a3e32f8170a7f2165122f062bb1acfe92b40", + "format": 1 + }, + { + "name": "molecule/default/roles/helm/tasks/run_test.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fb283e96bf6b73dfe02ae10ef6582dd49effdee1391643f13efef24192db71c0", + "format": 1 + }, + { + "name": "molecule/default/roles/helm/tasks/tests_chart", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "molecule/default/roles/helm/tasks/tests_chart/from_local_path.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a144bc4698cd47501438fc39922c612093eb7e6c451b67b6d7718b70c89ba276", + "format": 1 + }, + { + "name": "molecule/default/roles/helm/tasks/tests_chart/from_url.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5f34e46f354aac707e5e949e23259fdb603ed0872e09c3843aa487e16a758dd3", + "format": 1 + }, + { + "name": "molecule/default/roles/helm/tasks/tests_chart/from_repository.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c94590cf48449d877f5e3628d2e66b1b41971b06dcd59aa0b7eba5eb62468228", + "format": 1 + }, + { + "name": "molecule/default/roles/helm/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "molecule/default/roles/helm/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2760807cfc7a5ca3810b422935e7003c3a64d78ff52716504cd779aec0935433", + "format": 1 + }, + { + "name": "molecule/default/roles/helm/files", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "molecule/default/roles/helm/files/test-chart", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "molecule/default/roles/helm/files/test-chart/Chart.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b1d6b7aaa7b3de76b01f935c002b06f7897477042f872aa732d9847894a2e6af", + "format": 1 + }, + { + "name": "molecule/default/roles/helm/files/appversionless-chart", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "molecule/default/roles/helm/files/appversionless-chart/Chart.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fa6d86a1333abbc0bce9320952b49ceeda05fed029ab498c2bc9633b0507d62a", + "format": 1 + }, + { + "name": "molecule/default/roles/helm/files/values.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "dcaf3f49ec12d4365ac0b9dba73255eeba9fea38895c7e63dc6e1ea0fe1fdb02", + "format": 1 + }, + { + "name": "molecule/default/molecule.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0611d2c99e62008a510bc138a6433787a361cc91d77a304275ee37cb136868a0", + "format": 1 + }, + { + "name": "molecule/default/vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "molecule/default/vars/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4e04f9731d67f2929f59059ce9699b84c5f507a698f9002c13c2d6d193cad63a", + "format": 1 + }, + { + "name": "molecule/default/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "molecule/default/templates/pod_template_one.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "613b03132f88ef889b5218d8a67d233e06ea9c7cc3a2fb89837484a67735b7e5", + "format": 1 + }, + { + "name": "molecule/default/templates/pod_template_two.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8b836875751176a2fc898eba9eb216ae9e6da913762ca929c8f9a94846d3e48c", + "format": 1 + }, + { + "name": "molecule/default/templates/pod_template_three.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "09a0f5ad25f308d2d048592fb72cb486fc70f4abc41a388a36dca86d7f39a1a0", + "format": 1 + }, + { + "name": "Makefile", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "063d187f2c078025eebbb547fc6d427d42196ea3f752e35d01643ebf5418bbf6", + "format": 1 + }, + { + "name": "CONTRIBUTING.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7f120f22d94e1234a51f59d80f5dc8177e158915974ea6d9ba49b71e087c2cfd", + "format": 1 + }, + { + "name": "setup.cfg", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "125fefc5c9470b8e72cffc06937c30e2bc073f4ca6c1a593f131a6e1fd76edf2", + "format": 1 + }, + { + "name": "requirements.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "109a92cd78d59ea7c4b4ecba8f0b84994f9fca91a0c96bd4023a362926506f10", + "format": 1 + }, + { + "name": "plugins", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/inventory", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/inventory/k8s.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "25d9aa05596f935a33c15e0e8ea4ceb5ff9486e96ffa128199745bac8655710b", + "format": 1 + }, + { + "name": "plugins/inventory/openshift.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e2c18cc57098afdcbf74433612b64c793bbeb2dd43e9d8087dc7328fbf9a04e5", + "format": 1 + }, + { + "name": "plugins/doc_fragments", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/doc_fragments/k8s_wait_options.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3e8bb59ecfb31e6b439e07174e9df4e6b8295b6265013d95d6529073effb6fba", + "format": 1 + }, + { + "name": "plugins/doc_fragments/k8s_state_options.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9d2423ecb2a56d60aa1a4cdeafc979cf8b355c81a940ac1ad5572cfa5424bcf0", + "format": 1 + }, + { + "name": "plugins/doc_fragments/k8s_auth_options.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2245a1594eca3bec68c8e6a6bde0bd112ac03680091b14c103b89926aee6dc4a", + "format": 1 + }, + { + "name": "plugins/doc_fragments/k8s_scale_options.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d96c02ea5d432755b4c360787afdad456fc8e28e5de3badbf8ef595cfb7f7b6f", + "format": 1 + }, + { + "name": "plugins/doc_fragments/k8s_name_options.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a5991e5295127033b7a62fa413a5e7f1c5ae0c563575bc196fd05af71d6e474e", + "format": 1 + }, + { + "name": "plugins/doc_fragments/k8s_resource_options.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a36c38c9234310340e8c91a152ccd2f7a2f410593903f476cf1eea4b143ed43c", + "format": 1 + }, + { + "name": "plugins/doc_fragments/__init__.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "plugins/doc_fragments/helm_common_options.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8e327fe5fd055236f83ba2a298fd0ec64d3e4a12cca7a3f6626404c79164bea6", + "format": 1 + }, + { + "name": "plugins/lookup", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/lookup/k8s.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8c9d27f55bcd9b87d08c380ab0f90ca2972ba0ff007f965cbae3073c441a96ee", + "format": 1 + }, + { + "name": "plugins/connection", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/connection/kubectl.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "042d741b702fff0405858fa00331835b12f549c2f2dd644592c05eeab5d59893", + "format": 1 + }, + { + "name": "plugins/module_utils", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/module_utils/scale.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "34b5a52127a1fee4d6be9fbfd659fe89f6ae1bbe79e699e17bc5e098bcb9229a", + "format": 1 + }, + { + "name": "plugins/module_utils/common.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "608451ede6ab959540c3efc2fdbbe5f60f0621b86c0b58256e2f997b9d340fc5", + "format": 1 + }, + { + "name": "plugins/module_utils/raw.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e7cd1d41a1d027ef37b3ca9cdcc718629d8072d6a2d5071fc1e53c1c053485fc", + "format": 1 + }, + { + "name": "plugins/module_utils/__init__.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "plugins/modules", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/modules/helm_plugin.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "63bdb0acc8b5596ac197d8b4eae4e4e739ea48811d99efa4c222a7fe0c92db77", + "format": 1 + }, + { + "name": "plugins/modules/k8s_auth.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cdcd1898d9946bf80f14cad442319e9c3fd79a9ddd6cf4a856170526a425ad72", + "format": 1 + }, + { + "name": "plugins/modules/helm_repository.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cdb4c98ae55dd5bdc4b3026c06a2fbf2211e7ba02c56780480bdaec3a426c46f", + "format": 1 + }, + { + "name": "plugins/modules/k8s_scale.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "da4f3ffa85bb49e8969ee2947d0236dd73480cec902409d1744729e67a2508ec", + "format": 1 + }, + { + "name": "plugins/modules/k8s.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ec3d9a412f0bee0c44543eb658bda994e69fa69b28313a0843a1c7b0a88682e8", + "format": 1 + }, + { + "name": "plugins/modules/k8s_service.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8ab6a81bc8110297509bd1dc95369923d592bb4bd8cd1cbcec378e29d4be2bb9", + "format": 1 + }, + { + "name": "plugins/modules/helm_plugin_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c5e7f031db1623097f390283aa7e21ed782689234f9f19fd34f685ff2d59396a", + "format": 1 + }, + { + "name": "plugins/modules/k8s_log.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6e9c02d44ee7f0c94360c7df18dcae62d4fbfbc966f5d29a98c2e185bb3809c6", + "format": 1 + }, + { + "name": "plugins/modules/k8s_cluster_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e03422d0dfe1e5de705c8255ab857e0c8247c5c4cb5212d253ef2a3cb76e636d", + "format": 1 + }, + { + "name": "plugins/modules/k8s_exec.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c38c2e9cfb63581142e4a8fecfcd142edbcbf7fe458bc514700b7342c4104b7b", + "format": 1 + }, + { + "name": "plugins/modules/__init__.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "plugins/modules/k8s_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "820ad369c58e7fa106b250b2e662b4b8b6f5ee817df0b844c556c7dfe3e15568", + "format": 1 + }, + { + "name": "plugins/modules/helm_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9ab33c3277dc864295b08177431192da6fb0ba85887bb7741c11ceddfc662455", + "format": 1 + }, + { + "name": "plugins/modules/k8s_rollback.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "910a440deac4660325faabd5de013542b912e7ee43d9de2536e8b78c590d528b", + "format": 1 + }, + { + "name": "plugins/modules/helm.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "acddb9865215a677c0a0add08125078bcb29cbea5ffef6e95007b5af84c2f73c", + "format": 1 + }, + { + "name": "plugins/filter", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/filter/k8s.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "612e8d37d9c47a130dab854e4c106440dd0344512477b4f67c7dcffa4df9d810", + "format": 1 + }, + { + "name": "plugins/action", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/action/helm_plugin.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "47d7510ddd65c19163dd50981061e3d2a5acfdc711f3e5a64957ef993731211f", + "format": 1 + }, + { + "name": "plugins/action/k8s_auth.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "47d7510ddd65c19163dd50981061e3d2a5acfdc711f3e5a64957ef993731211f", + "format": 1 + }, + { + "name": "plugins/action/helm_repository.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "47d7510ddd65c19163dd50981061e3d2a5acfdc711f3e5a64957ef993731211f", + "format": 1 + }, + { + "name": "plugins/action/k8s_scale.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "47d7510ddd65c19163dd50981061e3d2a5acfdc711f3e5a64957ef993731211f", + "format": 1 + }, + { + "name": "plugins/action/k8s.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "47d7510ddd65c19163dd50981061e3d2a5acfdc711f3e5a64957ef993731211f", + "format": 1 + }, + { + "name": "plugins/action/k8s_service.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "47d7510ddd65c19163dd50981061e3d2a5acfdc711f3e5a64957ef993731211f", + "format": 1 + }, + { + "name": "plugins/action/helm_plugin_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "47d7510ddd65c19163dd50981061e3d2a5acfdc711f3e5a64957ef993731211f", + "format": 1 + }, + { + "name": "plugins/action/k8s_log.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "47d7510ddd65c19163dd50981061e3d2a5acfdc711f3e5a64957ef993731211f", + "format": 1 + }, + { + "name": "plugins/action/k8s_exec.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "47d7510ddd65c19163dd50981061e3d2a5acfdc711f3e5a64957ef993731211f", + "format": 1 + }, + { + "name": "plugins/action/k8s_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "47d7510ddd65c19163dd50981061e3d2a5acfdc711f3e5a64957ef993731211f", + "format": 1 + }, + { + "name": "plugins/action/helm_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "47d7510ddd65c19163dd50981061e3d2a5acfdc711f3e5a64957ef993731211f", + "format": 1 + }, + { + "name": "plugins/action/helm.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "47d7510ddd65c19163dd50981061e3d2a5acfdc711f3e5a64957ef993731211f", + "format": 1 + }, + { + "name": "codecov.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "caa848a2e02be5014890c5cbc7727e9a00d40394637c90886eb813d60f82c9c3", + "format": 1 + }, + { + "name": "README.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5a9851baf54c0b0ee25b22076a919d8c1edfda1c0160d5409b85d8fe9d916235", + "format": 1 + }, + { + "name": ".github", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": ".github/stale.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "923b49f6fb8b325ea890d05a42537b3f9c5aaf26b64a704c0fef4b696aa6a4bb", + "format": 1 + }, + { + "name": ".github/workflows", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": ".github/workflows/ci.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5123223dda2aa5d78ac904231712c44e50688cd3f34ecf3877f7ab83baf64f68", + "format": 1 + }, + { + "name": ".yamllint", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "13e808f3ce12e7a20a091bdb65fc1c2a5802dfaad85527639e62bcce84f8efb8", + "format": 1 + }, + { + "name": "LICENSE", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8b1ba204bb69a0ade2bfcf65ef294a920f6bb361b317dba43c7ef29d96332b9b", + "format": 1 + }, + { + "name": ".gitignore", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0b09d8cd3d45dc083718918038a51893eb1d463fdb637301b8d0786ce13f7906", + "format": 1 + }, + { + "name": "meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "meta/runtime.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f479f01c695fedc3d752888c848c4aeb4cbbe5202e13d36692d437e31b48ba3a", + "format": 1 + }, + { + "name": "changelogs", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "changelogs/changelog.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2b9a32fac1fae6112e4961097b9a5f3a26c8bfb1cb67b85e728871e7ff6f66df", + "format": 1 + }, + { + "name": "changelogs/config.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6f6bac3cb1d4c9b34f6925dfbc7420ed483674db59d4c88488cbfddf41aa1563", + "format": 1 + }, + { + "name": "changelogs/fragments", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "changelogs/fragments/180_raw_handle_exception.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7549ddb6dbbcdb638abaa5c4f58ef02e11877c557aaffe6c810fd5795ee1a0ee", + "format": 1 + }, + { + "name": "changelogs/fragments/94-openshift-apply-test.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "69b42c868864b3e136a8f270d3f527b57f4a56f097ee3c8d45ed1c71eb4f4441", + "format": 1 + }, + { + "name": "changelogs/fragments/211_execution-env-meta.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0cc27d574e452d6918209337eec444d1b3e0f5e9eaca90ba50ffbc41e127ad2b", + "format": 1 + }, + { + "name": "changelogs/fragments/191_contributing.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f4179d573a2af58250206ac41784e4c25b7a488fb8b817caa8c7a33a37fb174c", + "format": 1 + }, + { + "name": "changelogs/fragments/197_downstream-makefile.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "49634b5e9d1c6c37c906c81b7435d8c04988a81bbf4eb1646a1886584e126d3e", + "format": 1 + }, + { + "name": "changelogs/fragments/106-helm_replace.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a2efc790b8c0a25fd746f6de8abf91542cefe170859f01d84edf75d6113162d4", + "format": 1 + }, + { + "name": "changelogs/fragments/56-k8s-from_yaml-docs-examples.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a3b16ae0604e5a1a9aef75d5b7c514f5930e1679a6c95b7118d0c804168027b5", + "format": 1 + }, + { + "name": "changelogs/fragments/227_replace-kubernetesansiblemodule-class.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0cb44a775d2bd0a3000e1b7f0df4e1f91ca3dec46dc3a2c7b50d65654a881bb6", + "format": 1 + }, + { + "name": "changelogs/fragments/193_vault-kubeconfig-support.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4149c46f539ae7c5d8ddb9a3363c8fb64a0de5181a13a3942452e7194f9ce707", + "format": 1 + }, + { + "name": "changelogs/fragments/235_k8s_info-wait-support.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6762f3c43b152ec734f75eee45d4b3296510cb19f9cf72fa1f36fdb1a11ce836", + "format": 1 + }, + { + "name": "changelogs/fragments/49-k8s-add-persist_config-option.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "34081234d6be47d85c9508b527de08fdeeccb9e8c32dca536e2e82a380b32f42", + "format": 1 + }, + { + "name": "changelogs/fragments/123-flake8.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "19932edd9afbd1ee82f653f176436bf6c6ae25f2d5a143f6967c6d4a48d4ddec", + "format": 1 + }, + { + "name": "changelogs/fragments/140-kubeconfig-env.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "36448e2339cccd6bd202fd58fe8bee7ee9f88cdbb58ae8d1a7dfed8676d4897d", + "format": 1 + }, + { + "name": "changelogs/fragments/202_k8s_info.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e253ed1e9d71cdba124b75d6e8eb463be7ebc5194a3266b59c0f6082f312afe7", + "format": 1 + }, + { + "name": "changelogs/fragments/51-kubectl-security-disclosure.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "94efa7c7ec14918b5e7385db358b6b92ec0432930fc8c32eea726381be9c5b7f", + "format": 1 + }, + { + "name": "changelogs/fragments/246_helm-appversion-check.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1a57d750afafeaba14a9a97b1dee2dbf29ef5156d14203c9c7839fce678c3c54", + "format": 1 + }, + { + "name": "changelogs/fragments/13-fix-elements-argspec.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f207aa4cec126a54848357b1b05c65e2d27600ac04bf35d55a6ae71747a53570", + "format": 1 + }, + { + "name": "changelogs/fragments/93_helm-multiple-values-files.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "74c8041e794390ac72f4fd2bce3ae413980be2873a2a0cf323899dce5070f48a", + "format": 1 + }, + { + "name": "changelogs/fragments/107-action-groups-module_defaults.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5b28d2b817fb74f3beeaa49468e4afe1310899b855e93852b5ff225b554182e3", + "format": 1 + }, + { + "name": "changelogs/fragments/146-fqcn-in-docs.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "74caf9335cce38758bc8c05812515fd1523fa071f11fda0067c3bd8ddd155540", + "format": 1 + }, + { + "name": "changelogs/fragments/61-helm-new-modules.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "31fb22896bad222fea166f93678dcfb6e38f91b82453ff04cd5fca5fd0126368", + "format": 1 + }, + { + "name": "changelogs/fragments/119-handle-kubeconfig-error.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "415e076a6c2507605d40be45fb51bf45c6c40ce4ad6409f5a6eedc1a52420aa2", + "format": 1 + }, + { + "name": "changelogs/fragments/105-improved-k8s-diffs.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a42e6e0c29188ee9889e10bb3dcfb012bc667d7ac19c54fb51742453b62a12fa", + "format": 1 + }, + { + "name": "changelogs/fragments/78-github-actions-workflow.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "18e2234e760c362f1b50b05d5b58bc89aec5b4cf3dbbcac672322ca03d716bbf", + "format": 1 + }, + { + "name": "changelogs/fragments/54-k8s-add-exception-handling.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "41ef5af87514c419d75616d869720313629ea3fccc9cc3908eeda63d7a813865", + "format": 1 + }, + { + "name": "changelogs/fragments/127-remove-action_groups_redirection.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "408f3c04a3f1b3f4aea22f789c0eb50f9427015a6403c1540d0588efe961a1b6", + "format": 1 + }, + { + "name": "changelogs/fragments/157-helm-create-namespace.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c504bf1bca2b2bcedf4d1ea68da318bc8863a18c43f6faf543b54a0e033aa59b", + "format": 1 + }, + { + "name": "changelogs/fragments/100-k8s_scale-fix-wait.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "29b2759084d96200fa1e0ed92e599cdbf86bdf342398696dde056d7f4428a457", + "format": 1 + }, + { + "name": "changelogs/fragments/97-remove-k8s_facts-deprecation.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1e7119958907500f35927046fd7cf4cdd2933b171e4ac9131fd936be60681af9", + "format": 1 + }, + { + "name": "changelogs/fragments/260_k8s-positional-args.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "98d9c898b933fdf7891a975d13b23d41cc7813eada594963bfb08f3f17f5cf76", + "format": 1 + }, + { + "name": "changelogs/fragments/208_set-connection-plugin-transport.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "14426d5fb77ceb0f8247368c45c1c757d6694ac2f291c913e5bee6d9259d1210", + "format": 1 + }, + { + "name": "changelogs/fragments/115-helm-add-atomic.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a3a424e4580fa2b8b667b1c779093fe5bfac220ec72d5049dbe433c148d8c0d4", + "format": 1 + }, + { + "name": "changelogs/fragments/148-update-base-branch-main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "64451c6f2faabf0863c2e06c226475d0eec111da962cdadd04c27e2f34075481", + "format": 1 + }, + { + "name": "changelogs/fragments/168-test-stable-ansible.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e73189d10953c3838ac4fc7305137d5aeb2bd07bcfa5fc517fa7972ea6ecb023", + "format": 1 + }, + { + "name": "changelogs/fragments/114-minor-docs-fixes.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3c97e8832896f14540db550114ef182ca02c36663c354d0137915543f619730c", + "format": 1 + }, + { + "name": "changelogs/fragments/118-k8s-lookup-handle-set-object.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "58b8af9fcec479de14c43319771ef186d3839a40b233458307024a45443287bf", + "format": 1 + }, + { + "name": "changelogs/fragments/231_k8sansiblemixin-module.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "be2d4ef3df4938a95099d3764898b5592d1ef306378e1f35d1081fec0da90568", + "format": 1 + }, + { + "name": "changelogs/fragments/252_connection-plugin-fqcn-fix.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1e98a39d8c42ffd63500cc204c58d3bac2dbaab4452ad8a324e029d5fc7c43e9", + "format": 1 + }, + { + "name": "changelogs/fragments/200_kubectl_fix.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7c0cc4ab4d91fd0996801306f086d1f033d51bcc1317cc54707dbfc8e80acd4e", + "format": 1 + }, + { + "name": "changelogs/fragments/141-helm-add-k8s-env-vars.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "79b252a6bc0e8396fba35b3acbab94435c684799fed6fac0eef0a6009376e74d", + "format": 1 + }, + { + "name": "changelogs/fragments/102-dont-require-codecov-check-prs.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8bc248e6d49d1afb203940acded60103cef554ec575946c1b4b0190f4b02dd7e", + "format": 1 + }, + { + "name": "changelogs/fragments/18_k8s_info_wait.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ac227da1e9499d28e9ac5385373125fe94b7539f92ffa6ec9a835053b6e6b6ed", + "format": 1 + }, + { + "name": "changelogs/fragments/130-add-sanity-ignore-211.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d39eb84654d37a365dba1264c450534636faa123023b8eb8a9591db1a7fc3d43", + "format": 1 + }, + { + "name": "changelogs/fragments/4-k8s-prepare-collection-for-release.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8187074ce6cd5c96b360a6094f2d8842845219ebd0c68c60fb9defe98e45d951", + "format": 1 + }, + { + "name": "changelogs/fragments/117-helm-minor-refactor.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "da73170dc3cc75ca1ed8fdce6f79fe210c071f94953598191705f7bc2bb433cc", + "format": 1 + }, + { + "name": "changelogs/fragments/122_k8s_exec_rc.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2d8a3f73ae283d47aa1dc98dddc9283625440ea205c619fc38cb3889a2a0306a", + "format": 1 + }, + { + "name": "changelogs/fragments/65_kubectl.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2c9ec2a82cc793b0d980bc1a0bcf3ada17bc433dfef9ce0bcb8144ee46c9c135", + "format": 1 + }, + { + "name": "changelogs/fragments/35-wait-conditions.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cb1bfa4cf6fc6f458cc10976fb1de8a0dc3c74eb97c6b0ab22ad4a050966a436", + "format": 1 + }, + { + "name": "changelogs/fragments/228_dont-mark-changed-if-diff-irrelevant.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bd01b899e57ccf3ce8dd30d9bc68c3f91c83ef774286ff444aa22c78cc1fcf13", + "format": 1 + }, + { + "name": "changelogs/fragments/234_k8s-selfsubjectaccessreviews.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e437c7e0648b0d3af3fda5c1682932491691d06572b6825d4b27f93052dabc9c", + "format": 1 + }, + { + "name": "changelogs/fragments/243_template.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f0cf997bfbee6f66194150cf18d86101ed9347c55aa584a7ac16ffd5bbe414da", + "format": 1 + }, + { + "name": "changelogs/fragments/229_helm-vault-support.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "05b46e7481599d041938fcf5217c315aea60ac6f20c24ddaef5256a90a3a3fb2", + "format": 1 + }, + { + "name": "changelogs/fragments/103-fix-inventory-docs-structure.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c18a2f010a936e2bc72f2d7ea303a9d7ecc02360c18c5fd1654be908e98a6f5e", + "format": 1 + }, + { + "name": "changelogs/fragments/156-k8s_info-vars-example.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d1e1fd14f7acd68ee24b561134fb74f569c1c7779e47b33209c9d09b7ca6c9e4", + "format": 1 + }, + { + "name": "changelogs/fragments/16-k8s_log-new-module.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "059f4bf1f128f65a3bd4d9ea6ce66d201a748586d55294be2945c88e6081cdd6", + "format": 1 + }, + { + "name": "changelogs/fragments/192_helm-status-check-mode.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "871c635ca1757f39ed77121a075372bf38ac2bda119089613cdc970ff520df83", + "format": 1 + }, + { + "name": "changelogs/fragments/81-rename-repository.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8973a949dfe02dc9ee66965ce00d03b45e8c84dd2f01376883b909b8a3499196", + "format": 1 + }, + { + "name": "changelogs/fragments/110-helm-minor-refactor.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0975f8e1501efe6d8ffef1db97bc9cd0a69a4e79cfa41b7ecf383dbaa0f0a80c", + "format": 1 + }, + { + "name": "changelogs/fragments/155-ensure-check-mode-waits.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7e205bf53447c090021ce5ce95e57c087e8fa14dcaf389ebb3556288c28dc5ef", + "format": 1 + }, + { + "name": "changelogs/fragments/33-k8s_service-fix-argspec.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1184faa352f5ef61ab1b59aa19279edd7dffb73b5c2776158945070245479604", + "format": 1 + }, + { + "name": "changelogs/fragments/95-remove-ANSIBLE_METADATA.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "815399b60144779355f3534f3756156216689f985fd9194db3e0e66c3abd318e", + "format": 1 + }, + { + "name": "changelogs/fragments/223_refactor-module_utils.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4381bedc72150b0b5a40449bd7a9d52dc2e1dedd658db99b74e0d25e8e1f0535", + "format": 1 + }, + { + "name": "changelogs/fragments/230_k8s-template-parameter.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0a807b348c0a13af4aa3e5ea67f853ca016a838cf7b79d9ee30b5243079a89ea", + "format": 1 + }, + { + "name": "changelogs/fragments/196_probot-stale-bot.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c37525096d44bd3e46a91e50dec7dbdf514b7d5404edb85ecbbaeab61779846d", + "format": 1 + }, + { + "name": "changelogs/fragments/52-kubectl-connection-docsfix.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f50ef4550a5d20cccd9a037308f427c9d4e28065fc9c550ab301342173fe86b8", + "format": 1 + }, + { + "name": "changelogs/fragments/98-add-version_added.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "59969ad8303eac6b0bdaf83749836c8741ed085e121c53b8cdfff2f15d317b13", + "format": 1 + }, + { + "name": "changelogs/fragments/158-k8s_exec-return-rc.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5f35ca7c5a447c99487bbba5c3c500ccf8f7c6cd89b9efd3fde8001f411817f0", + "format": 1 + }, + { + "name": "changelogs/fragments/85-exclude-unnecessary-files-when-building.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "69efbbdabb620f96098f3b93cefdf140a071507c4f022d9bbfd13c069a36f3c8", + "format": 1 + }, + { + "name": "changelogs/fragments/84-check_mode-service-change.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3d2e8f0d03c25e7336a0711530dfd6110916a4bcdf06ff7b6c7b27bf296badf4", + "format": 1 + }, + { + "name": "changelogs/fragments/224_handle-dynamicclient-exception.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f3bf7c7d1359f6427265d1a058729dd1569c077a38ec5773cda13d3f6f8d8d10", + "format": 1 + }, + { + "name": "changelogs/fragments/139-fix-manifest-ends-with-separator.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b6e8fce840c726a400f85d2ef474e826498727032dd7c361e0dcbd5cce0600a6", + "format": 1 + }, + { + "name": "changelogs/fragments/153-update-openshift-requirements.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ef7ac82e35b15ed21d47006a8e116285eab5a3fbf905bf5885e436e513e31632", + "format": 1 + }, + { + "name": "changelogs/fragments/131-changelog-fragments.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5c5515671df29a7d9c396ebb6850f46b2df5f37dd85cdf12bbe077a5e483d031", + "format": 1 + }, + { + "name": "changelogs/fragments/159-kubectl-redact-token-and-password.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5dba154d9fcce77f1a347831aea12a88004f7e5ac6a67721822d4de97c622756", + "format": 1 + }, + { + "name": "changelogs/fragments/14-k8s_exec-new-module.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1cd01ee789a0df167d14fd7db6b0d466d3f3b957568d2bb2e3eb1611a2cde51a", + "format": 1 + }, + { + "name": "changelogs/fragments/69-k8s_log-dont-parse-as-json.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d95c9fe5d44d8ec9b3e823cd01d0fef04aca946e5125c27fa0a7f1fb8cdf23f7", + "format": 1 + }, + { + "name": "changelogs/fragments/126-requires_ansible-version-constraints.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8a54595681599536db062913e58afa855b7f10ff0dc30ba987ba9811077f5a30", + "format": 1 + }, + { + "name": "changelogs/fragments/154-helm_plugin-helm_plugin_info-new-modules.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cc4e0c2651924a5d2e22a14958280ee4cbc338c9d22e4dbb23ded5b29f10f79e", + "format": 1 + }, + { + "name": "changelogs/fragments/152-helm-context-aliases.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c2b7a69fcc740055034283d134c441d4c255594db19dc4cf5a745f06656523b1", + "format": 1 + }, + { + "name": "bindep.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d9a17769c3c0697b8a0b59744552d7c1b174a67722c52285426d42f035f1f58f", + "format": 1 + }, + { + "name": "tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/sanity", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/sanity/ignore-2.11.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c06bae62022660da337757f833b58d770b5367776ab540c0291c5efaa0433891", + "format": 1 + }, + { + "name": "tests/sanity/ignore-2.10.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "27401ce1dc74f49e9b4ad2e12c97ce068047de9d79af7503e86e61ad64d02331", + "format": 1 + }, + { + "name": "tests/sanity/ignore-2.9.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bb7d93cea004db5fcdf797f576b1bacbee02e0300f4e47fe992b50c3050ab1fa", + "format": 1 + }, + { + "name": "tests/integration", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/kubernetes", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/kubernetes/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/kubernetes/tasks/validate_installed.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "413678923be60e4485ecad94538e31111a8b8bdfb01789cbc4a6fc4c0b48c1d3", + "format": 1 + }, + { + "name": "tests/integration/targets/kubernetes/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "05401556bfe67c7fa187a6cae7927a9778d7ccf906246a72ec812175019392dd", + "format": 1 + }, + { + "name": "tests/integration/targets/kubernetes/tasks/setup_remote_tmp_dir.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "489ccbebe22f66827ea2ddf71902e8a5122eb47dea0d0ae18ba1d3f93cbf67ae", + "format": 1 + }, + { + "name": "tests/integration/targets/kubernetes/tasks/openshift.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ec7abdfc35e65b849a8ed8e7d2dd271baad47930c70771db59a52b47691fa0e2", + "format": 1 + }, + { + "name": "tests/integration/targets/kubernetes/tasks/validate_not_installed.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d297104a468f78ea77c9d8580f58773632cdbb227b43043632b79d75fb751de8", + "format": 1 + }, + { + "name": "tests/integration/targets/kubernetes/tasks/older_openshift_fail.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9f67dcd50668070a300b458ce87c5424b8a57f9ed13b823a1b7c53aa899ff0d2", + "format": 1 + }, + { + "name": "tests/integration/targets/kubernetes/library", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/kubernetes/library/README.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "94a73d0d4b8e6796159ef3bacd51868a11f5b684d3bb719d935123ff61259816", + "format": 1 + }, + { + "name": "tests/integration/targets/kubernetes/library/test_tempfile.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "efc92ba8bb66458bb67d33d65d0d596713bb2834ff460bf8a24de78a2d58dd52", + "format": 1 + }, + { + "name": "tests/integration/targets/kubernetes/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/kubernetes/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "286beb8d1ffb87c0bc085b23cb5e0c250fd205174c12245764925b0d6227aa2e", + "format": 1 + }, + { + "name": "tests/integration/targets/kubernetes/README.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9e7409bbd4fae71733ca025b5b1c8f062b184989a7af4abe322cbea0881e146e", + "format": 1 + }, + { + "name": "tests/integration/targets/kubernetes/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6cdcf35af12c60c96ac2bb52cfa492936962ae3c8a2bdc174a8c7b052c7ccf99", + "format": 1 + }, + { + "name": "tests/integration/targets/kubernetes/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/kubernetes/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ec4fa30fc4a7b9e002d1c7b3932286ace72ba36e4f532e2cc79f49d07e0794c3", + "format": 1 + }, + { + "name": "tests/integration/targets/kubernetes/handlers", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/kubernetes/handlers/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "33dbdc47d3594105cbe82d177fe852a3b85c9a72d7bbc342580d8ebdb604209c", + "format": 1 + }, + { + "name": "tests/integration/targets/kubernetes/files", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/kubernetes/files/setup-crd.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ec1fd5a8731fd07a20799b6b84aa815aa2ded58b2107bef808d96dfe58137d44", + "format": 1 + }, + { + "name": "tests/integration/targets/kubernetes/files/crd-resource.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4921362ac3c4afac5f42ebb90b37bcb75e1fe20929bb0e45d0df4c190d28f577", + "format": 1 + }, + { + "name": "tests/integration/targets/kubernetes/files/kuard-extra-property.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "69a69ba619f5b57a5ea0efbe038a4525bb3d75a2f6765635834ffa266d49e640", + "format": 1 + }, + { + "name": "tests/integration/targets/kubernetes/files/kuard-invalid-type.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1933ad2286d677b22ad9b8aa45c185ee1114529d24b4f915f58f9b254a75b3fd", + "format": 1 + }, + { + "name": "utils", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "utils/downstream.sh", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6b1983dd817148d81a4a781e70f6c4faddd612ab080f1b23d34636848b30bf23", + "format": 1 + } + ], + "format": 1 +}
\ No newline at end of file diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/LICENSE b/collections-debian-merged/ansible_collections/community/kubernetes/LICENSE new file mode 100644 index 00000000..e72bfdda --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/> + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + <one line to give the program's name and a brief idea of what it does.> + Copyright (C) <year> <name of author> + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see <https://www.gnu.org/licenses/>. + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + <program> Copyright (C) <year> <name of author> + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +<https://www.gnu.org/licenses/>. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +<https://www.gnu.org/licenses/why-not-lgpl.html>.
\ No newline at end of file diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/MANIFEST.json b/collections-debian-merged/ansible_collections/community/kubernetes/MANIFEST.json new file mode 100644 index 00000000..fe43a813 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/MANIFEST.json @@ -0,0 +1,43 @@ +{ + "collection_info": { + "namespace": "community", + "name": "kubernetes", + "version": "1.1.1", + "authors": [ + "chouseknecht (https://github.com/chouseknecht)", + "geerlingguy (https://www.jeffgeerling.com/)", + "maxamillion (https://github.com/maxamillion)", + "jmontleon (https://github.com/jmontleon)", + "fabianvf (https://github.com/fabianvf)", + "willthames (https://github.com/willthames)", + "mmazur (https://github.com/mmazur)", + "jamescassell (https://github.com/jamescassell)" + ], + "readme": "README.md", + "tags": [ + "kubernetes", + "k8s", + "cloud", + "infrastructure", + "openshift", + "okd", + "cluster" + ], + "description": "Kubernetes Collection for Ansible.", + "license": [], + "license_file": "LICENSE", + "dependencies": {}, + "repository": "https://github.com/ansible-collections/community.kubernetes", + "documentation": "", + "homepage": "", + "issues": "https://github.com/ansible-collections/community.kubernetes/issues" + }, + "file_manifest_file": { + "name": "FILES.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fb463db674458f47f7c5163296db1667c97bff087ac491724e6ce11e8e1f04c8", + "format": 1 + }, + "format": 1 +}
\ No newline at end of file diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/Makefile b/collections-debian-merged/ansible_collections/community/kubernetes/Makefile new file mode 100644 index 00000000..26e675fe --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/Makefile @@ -0,0 +1,43 @@ +# Also needs to be updated in galaxy.yml +VERSION = 1.1.1 + +TEST_ARGS ?= "" +PYTHON_VERSION ?= `python -c 'import platform; print("{0}.{1}".format(platform.python_version_tuple()[0], platform.python_version_tuple()[1]))'` + +clean: + rm -f community-kubernetes-${VERSION}.tar.gz + rm -rf ansible_collections + rm -rf tests/output + +build: clean + ansible-galaxy collection build + +release: build + ansible-galaxy collection publish community-kubernetes-${VERSION}.tar.gz + +install: build + ansible-galaxy collection install -p ansible_collections community-kubernetes-${VERSION}.tar.gz + +test-sanity: + ansible-test sanity --docker -v --color --python $(PYTHON_VERSION) $(?TEST_ARGS) + +test-integration: + ansible-test integration --docker -v --color --retry-on-error --python $(PYTHON_VERSION) --continue-on-error --diff --coverage $(?TEST_ARGS) + +test-molecule: + molecule test + +downstream-test-sanity: + ./utils/downstream.sh -s + +downstream-test-integration: + ./utils/downstream.sh -i + +downstream-test-molecule: + ./utils/downstream.sh -m + +downstream-build: + ./utils/downstream.sh -b + +downstream-release: + ./utils/downstream.sh -r diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/README.md b/collections-debian-merged/ansible_collections/community/kubernetes/README.md new file mode 100644 index 00000000..a9561178 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/README.md @@ -0,0 +1,187 @@ +# Kubernetes Collection for Ansible + +[![CI](https://github.com/ansible-collections/community.kubernetes/workflows/CI/badge.svg?event=push)](https://github.com/ansible-collections/community.kubernetes/actions) [![Codecov](https://img.shields.io/codecov/c/github/ansible-collections/community.kubernetes)](https://codecov.io/gh/ansible-collections/community.kubernetes) + +This repo hosts the `community.kubernetes` (a.k.a. `kubernetes.core`) Ansible Collection. + +The collection includes a variety of Ansible content to help automate the management of applications in Kubernetes and OpenShift clusters, as well as the provisioning and maintenance of clusters themselves. + +## Included content + +Click on the name of a plugin or module to view that content's documentation: + + - **Connection Plugins**: + - [kubectl](https://docs.ansible.com/ansible/2.10/collections/community/kubernetes/kubectl_connection.html) + - **Filter Plugins**: + - [k8s_config_resource_name](https://docs.ansible.com/ansible/latest/user_guide/playbooks_filters.html#kubernetes-filters) + - **Inventory Source**: + - [k8s](https://docs.ansible.com/ansible/2.10/collections/community/kubernetes/k8s_inventory.html) + - [openshift](https://docs.ansible.com/ansible/2.10/collections/community/kubernetes/openshift_inventory.html) + - **Lookup Plugins**: + - [k8s](https://docs.ansible.com/ansible/2.10/collections/community/kubernetes/k8s_lookup.html) + - **Modules**: + - [k8s](https://docs.ansible.com/ansible/2.10/collections/community/kubernetes/k8s_module.html) + - [k8s_auth](https://docs.ansible.com/ansible/2.10/collections/community/kubernetes/k8s_auth_module.html) + - [k8s_cluster_info](https://github.com/ansible-collections/community.kubernetes/blob/main/plugins/modules/k8s_cluster_info.py) + - [k8s_exec](https://docs.ansible.com/ansible/2.10/collections/community/kubernetes/k8s_exec_module.html) + - [k8s_info](https://docs.ansible.com/ansible/2.10/collections/community/kubernetes/k8s_info_module.html) + - [k8s_log](https://docs.ansible.com/ansible/2.10/collections/community/kubernetes/k8s_log_module.html) + - [k8s_scale](https://docs.ansible.com/ansible/2.10/collections/community/kubernetes/k8s_scale_module.html) + - [k8s_service](https://docs.ansible.com/ansible/2.10/collections/community/kubernetes/k8s_service_module.html) + - [helm](https://docs.ansible.com/ansible/2.10/collections/community/kubernetes/helm_module.html) + - [helm_info](https://docs.ansible.com/ansible/2.10/collections/community/kubernetes/helm_info_module.html) + - [helm_plugin](https://github.com/ansible-collections/community.kubernetes/blob/main/plugins/modules/helm_plugin.py) + - [helm_plugin_info](https://github.com/ansible-collections/community.kubernetes/blob/main/plugins/modules/helm_plugin_info.py) + - [helm_repository](https://docs.ansible.com/ansible/2.10/collections/community/kubernetes/helm_repository_module.html) + +## Installation and Usage + +### Installing the Collection from Ansible Galaxy + +Before using the Kubernetes collection, you need to install it with the Ansible Galaxy CLI: + + ansible-galaxy collection install community.kubernetes + +You can also include it in a `requirements.yml` file and install it via `ansible-galaxy collection install -r requirements.yml`, using the format: + +```yaml +--- +collections: + - name: community.kubernetes + version: 1.1.1 +``` + +### Installing the OpenShift Python Library + +Content in this collection requires the [OpenShift Python client](https://pypi.org/project/openshift/) to interact with Kubernetes' APIs. You can install it with: + + pip3 install openshift + +### Using modules from the Kubernetes Collection in your playbooks + +It's preferable to use content in this collection using their Fully Qualified Collection Namespace (FQCN), for example `community.kubernetes.k8s_info`: + +```yaml +--- +- hosts: localhost + gather_facts: false + connection: local + + tasks: + - name: Ensure the myapp Namespace exists. + community.kubernetes.k8s: + api_version: v1 + kind: Namespace + name: myapp + state: present + + - name: Ensure the myapp Service exists in the myapp Namespace. + community.kubernetes.k8s: + state: present + definition: + apiVersion: v1 + kind: Service + metadata: + name: myapp + namespace: myapp + spec: + type: LoadBalancer + ports: + - port: 8080 + targetPort: 8080 + selector: + app: myapp + + - name: Get a list of all Services in the myapp namespace. + community.kubernetes.k8s_info: + kind: Service + namespace: myapp + register: myapp_services + + - name: Display number of Services in the myapp namespace. + debug: + var: myapp_services.resources | count +``` + +If upgrading older playbooks which were built prior to Ansible 2.10 and this collection's existence, you can also define `collections` in your play and refer to this collection's modules as you did in Ansible 2.9 and below, as in this example: + +```yaml +--- +- hosts: localhost + gather_facts: false + connection: local + + collections: + - community.kubernetes + + tasks: + - name: Ensure the myapp Namespace exists. + k8s: + api_version: v1 + kind: Namespace + name: myapp + state: present +``` + +For documentation on how to use individual modules and other content included in this collection, please see the links in the 'Included content' section earlier in this README. + +## Testing and Development + +If you want to develop new content for this collection or improve what's already here, the easiest way to work on the collection is to clone it into one of the configured [`COLLECTIONS_PATHS`](https://docs.ansible.com/ansible/latest/reference_appendices/config.html#collections-paths), and work on it there. + +See [Contributing to community.kubernetes](CONTRIBUTING.md). + +### Testing with `ansible-test` + +The `tests` directory contains configuration for running sanity and integration tests using [`ansible-test`](https://docs.ansible.com/ansible/latest/dev_guide/testing_integration.html). + +You can run the collection's test suites with the commands: + + make test-sanity + make test-integration + +### Testing with `molecule` + +There are also integration tests in the `molecule` directory which are meant to be run against a local Kubernetes cluster, e.g. using [KinD](https://kind.sigs.k8s.io) or [Minikube](https://minikube.sigs.k8s.io). To setup a local cluster using KinD and run Molecule: + + kind create cluster + make test-molecule + +## Publishing New Versions + +Releases are automatically built and pushed to Ansible Galaxy for any new tag. Before tagging a release, make sure to do the following: + + 1. Update the version in the following places: + a. The `version` in `galaxy.yml` + b. This README's `requirements.yml` example + c. The `DOWNSTREAM_VERSION` in `utils/downstream.sh` + d. The `VERSION` in `Makefile` + 1. Update the CHANGELOG: + 1. Make sure you have [`antsibull-changelog`](https://pypi.org/project/antsibull-changelog/) installed. + 1. Make sure there are fragments for all known changes in `changelogs/fragments`. + 1. Run `antsibull-changelog release`. + 1. Commit the changes and create a PR with the changes. Wait for tests to pass, then merge it once they have. + 1. Tag the version in Git and push to GitHub. + 1. Manually build and release the `kubernetes.core` collection (see following section). + +After the version is published, verify it exists on the [Kubernetes Collection Galaxy page](https://galaxy.ansible.com/community/kubernetes). + +### Publishing `kubernetes.core` + +Until the contents of repository are moved into a new `kubernetes.core` repository on GitHub, this repository is the source of both the `kubernetes.core` and `community.kubernetes` repositories on Ansible Galaxy. + +To publish the `kubernetes.core` collection on Ansible Galaxy, do the following: + + 1. Run `make downstream-release` (on macOS, add `LC_ALL=C` before the command). + +The process for uploading a supported release to Automation Hub is documented separately. + +## More Information + +For more information about Ansible's Kubernetes integration, join the `#ansible-kubernetes` channel on Freenode IRC, and browse the resources in the [Kubernetes Working Group](https://github.com/ansible/community/wiki/Kubernetes) Community wiki page. + +## License + +GNU General Public License v3.0 or later + +See LICENCE to see the full text. diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/bindep.txt b/collections-debian-merged/ansible_collections/community/kubernetes/bindep.txt new file mode 100644 index 00000000..3f85228b --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/bindep.txt @@ -0,0 +1 @@ +kubernetes-client [platform:fedora] diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/changelog.yaml b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/changelog.yaml new file mode 100644 index 00000000..17ba48db --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/changelog.yaml @@ -0,0 +1,254 @@ +ancestor: null +releases: + 0.10.0: + changes: + bugfixes: + - k8s - Add exception handling when retrieving k8s client (https://github.com/ansible-collections/community.kubernetes/pull/54). + - k8s - Fix argspec for 'elements' (https://github.com/ansible-collections/community.kubernetes/issues/13). + - k8s - Use ``from_yaml`` filter with lookup examples in ``k8s`` module documentation + examples (https://github.com/ansible-collections/community.kubernetes/pull/56). + - k8s_service - Fix argspec (https://github.com/ansible-collections/community.kubernetes/issues/33). + - kubectl - Fix documentation in kubectl connection plugin (https://github.com/ansible-collections/community.kubernetes/pull/52). + major_changes: + - k8s_exec - New module for executing commands on pods via Kubernetes API (https://github.com/ansible-collections/community.kubernetes/pull/14). + - k8s_log - New module for retrieving pod logs (https://github.com/ansible-collections/community.kubernetes/pull/16). + minor_changes: + - k8s - Added ``persist_config`` option for persisting refreshed tokens (https://github.com/ansible-collections/community.kubernetes/issues/49). + security_fixes: + - kubectl - Warn about information disclosure when using options like ``kubectl_password``, + ``kubectl_extra_args``, and ``kubectl_token`` to pass data through to the + command line using the ``kubectl`` connection plugin (https://github.com/ansible-collections/community.kubernetes/pull/51). + fragments: + - 13-fix-elements-argspec.yaml + - 14-k8s_exec-new-module.yaml + - 16-k8s_log-new-module.yaml + - 33-k8s_service-fix-argspec.yaml + - 49-k8s-add-persist_config-option.yaml + - 51-kubectl-security-disclosure.yaml + - 52-kubectl-connection-docsfix.yaml + - 54-k8s-add-exception-handling.yaml + - 56-k8s-from_yaml-docs-examples.yaml + modules: + - description: Execute command in Pod + name: k8s_exec + namespace: '' + - description: Fetch logs from Kubernetes resources + name: k8s_log + namespace: '' + release_date: '2020-03-23' + 0.11.0: + changes: + bugfixes: + - Make sure extra files are not included in built collection (https://github.com/ansible-collections/community.kubernetes/pull/85). + - Update GitHub Actions workflow for better CI stability (https://github.com/ansible-collections/community.kubernetes/pull/78). + - k8s_log - Module no longer attempts to parse log as JSON (https://github.com/ansible-collections/community.kubernetes/pull/69). + major_changes: + - helm - New module for managing Helm charts (https://github.com/ansible-collections/community.kubernetes/pull/61). + - helm_info - New module for retrieving Helm chart information (https://github.com/ansible-collections/community.kubernetes/pull/61). + - helm_repository - New module for managing Helm repositories (https://github.com/ansible-collections/community.kubernetes/pull/61). + minor_changes: + - Rename repository to ``community.kubernetes`` (https://github.com/ansible-collections/community.kubernetes/pull/81). + fragments: + - 61-helm-new-modules.yaml + - 69-k8s_log-dont-parse-as-json.yaml + - 78-github-actions-workflow.yaml + - 81-rename-repository.yaml + - 85-exclude-unnecessary-files-when-building.yaml + modules: + - description: Manages Kubernetes packages with the Helm package manager + name: helm + namespace: '' + - description: Get information from Helm package deployed inside the cluster + name: helm_info + namespace: '' + - description: Add and remove Helm repository + name: helm_repository + namespace: '' + release_date: '2020-05-04' + 0.11.1: + changes: + bugfixes: + - Fix suboption docs structure for inventory plugins (https://github.com/ansible-collections/community.kubernetes/pull/103). + - Handle invalid kubeconfig parsing error (https://github.com/ansible-collections/community.kubernetes/pull/119). + - Make sure Service changes run correctly in check_mode (https://github.com/ansible-collections/community.kubernetes/pull/84). + - k8s_info - remove unneccessary k8s_facts deprecation notice (https://github.com/ansible-collections/community.kubernetes/pull/97). + - k8s_scale - Fix scale wait and add tests (https://github.com/ansible-collections/community.kubernetes/pull/100). + - raw - handle condition when definition is none (https://github.com/ansible-collections/community.kubernetes/pull/139). + major_changes: + - Add changelog and fragments and document changelog process (https://github.com/ansible-collections/community.kubernetes/pull/131). + minor_changes: + - Add action groups for playbooks with module_defaults (https://github.com/ansible-collections/community.kubernetes/pull/107). + - Add requires_ansible version constraints to runtime.yml (https://github.com/ansible-collections/community.kubernetes/pull/126). + - Add sanity test ignore file for Ansible 2.11 (https://github.com/ansible-collections/community.kubernetes/pull/130). + - Add test for openshift apply bug (https://github.com/ansible-collections/community.kubernetes/pull/94). + - Add version_added to each new collection module (https://github.com/ansible-collections/community.kubernetes/pull/98). + - Check Python code using flake8 (https://github.com/ansible-collections/community.kubernetes/pull/123). + - Don't require project coverage check on PRs (https://github.com/ansible-collections/community.kubernetes/pull/102). + - Improve k8s Deployment and Daemonset wait conditions (https://github.com/ansible-collections/community.kubernetes/pull/35). + - Minor documentation fixes and use of FQCN in some examples (https://github.com/ansible-collections/community.kubernetes/pull/114). + - Remove action_groups_redirection entry from meta/runtime.yml (https://github.com/ansible-collections/community.kubernetes/pull/127). + - Remove deprecated ANSIBLE_METADATA field (https://github.com/ansible-collections/community.kubernetes/pull/95). + - Use FQCN in module docs and plugin examples (https://github.com/ansible-collections/community.kubernetes/pull/146). + - Use improved kubernetes diffs where possible (https://github.com/ansible-collections/community.kubernetes/pull/105). + - helm - add 'atomic' option (https://github.com/ansible-collections/community.kubernetes/pull/115). + - helm - minor code refactoring (https://github.com/ansible-collections/community.kubernetes/pull/110). + - helm_info and helm_repository - minor code refactor (https://github.com/ansible-collections/community.kubernetes/pull/117). + - k8s - Handle set object retrieved from lookup plugin (https://github.com/ansible-collections/community.kubernetes/pull/118). + fragments: + - 100-k8s_scale-fix-wait.yaml + - 102-dont-require-codecov-check-prs.yaml + - 103-fix-inventory-docs-structure.yaml + - 105-improved-k8s-diffs.yaml + - 107-action-groups-module_defaults.yaml + - 110-helm-minor-refactor.yaml + - 114-minor-docs-fixes.yaml + - 115-helm-add-atomic.yaml + - 117-helm-minor-refactor.yaml + - 118-k8s-lookup-handle-set-object.yaml + - 119-handle-kubeconfig-error.yaml + - 123-flake8.yaml + - 126-requires_ansible-version-constraints.yaml + - 127-remove-action_groups_redirection.yaml + - 130-add-sanity-ignore-211.yaml + - 131-changelog-fragments.yaml + - 139-fix-manifest-ends-with-separator.yml + - 146-fqcn-in-docs.yaml + - 35-wait-conditions.yaml + - 84-check_mode-service-change.yaml + - 94-openshift-apply-test.yaml + - 95-remove-ANSIBLE_METADATA.yaml + - 97-remove-k8s_facts-deprecation.yaml + - 98-add-version_added.yaml + release_date: '2020-07-01' + 0.9.0: + changes: + major_changes: + - k8s - Inventory source migrated from Ansible 2.9 to Kubernetes collection. + - k8s - Lookup plugin migrated from Ansible 2.9 to Kubernetes collection. + - k8s - Module migrated from Ansible 2.9 to Kubernetes collection. + - k8s_auth - Module migrated from Ansible 2.9 to Kubernetes collection. + - k8s_config_resource_name - Filter plugin migrated from Ansible 2.9 to Kubernetes + collection. + - k8s_info - Module migrated from Ansible 2.9 to Kubernetes collection. + - k8s_scale - Module migrated from Ansible 2.9 to Kubernetes collection. + - k8s_service - Module migrated from Ansible 2.9 to Kubernetes collection. + - kubectl - Connection plugin migrated from Ansible 2.9 to Kubernetes collection. + - openshift - Inventory source migrated from Ansible 2.9 to Kubernetes collection. + fragments: + - 4-k8s-prepare-collection-for-release.yaml + release_date: '2020-02-05' + 1.0.0: + changes: + bugfixes: + - Test against stable ansible branch so molecule tests work (https://github.com/ansible-collections/community.kubernetes/pull/168). + - Update openshift requirements in k8s module doc (https://github.com/ansible-collections/community.kubernetes/pull/153). + major_changes: + - helm_plugin - new module to manage Helm plugins (https://github.com/ansible-collections/community.kubernetes/pull/154). + - helm_plugin_info - new modules to gather information about Helm plugins (https://github.com/ansible-collections/community.kubernetes/pull/154). + - k8s_exec - Return rc for the command executed (https://github.com/ansible-collections/community.kubernetes/pull/158). + minor_changes: + - Ensure check mode results are as expected (https://github.com/ansible-collections/community.kubernetes/pull/155). + - Update base branch to 'main' (https://github.com/ansible-collections/community.kubernetes/issues/148). + - helm - Add support for K8S_AUTH_CONTEXT, K8S_AUTH_KUBECONFIG env (https://github.com/ansible-collections/community.kubernetes/pull/141). + - helm - Allow creating namespaces with Helm (https://github.com/ansible-collections/community.kubernetes/pull/157). + - helm - add aliases context for kube_context (https://github.com/ansible-collections/community.kubernetes/pull/152). + - helm - add support for K8S_AUTH_KUBECONFIG and K8S_AUTH_CONTEXT environment + variable (https://github.com/ansible-collections/community.kubernetes/issues/140). + - helm_info - add aliases context for kube_context (https://github.com/ansible-collections/community.kubernetes/pull/152). + - helm_info - add support for K8S_AUTH_KUBECONFIG and K8S_AUTH_CONTEXT environment + variable (https://github.com/ansible-collections/community.kubernetes/issues/140). + - k8s_exec - return RC for the command executed (https://github.com/ansible-collections/community.kubernetes/issues/122). + - k8s_info - Update example using vars (https://github.com/ansible-collections/community.kubernetes/pull/156). + security_fixes: + - kubectl - connection plugin now redact kubectl_token and kubectl_password + in console log (https://github.com/ansible-collections/community.kubernetes/issues/65). + - kubectl - redacted token and password from console log (https://github.com/ansible-collections/community.kubernetes/pull/159). + fragments: + - 122_k8s_exec_rc.yml + - 140-kubeconfig-env.yaml + - 141-helm-add-k8s-env-vars.yaml + - 148-update-base-branch-main.yaml + - 152-helm-context-aliases.yml + - 153-update-openshift-requirements.yaml + - 154-helm_plugin-helm_plugin_info-new-modules.yaml + - 155-ensure-check-mode-waits.yaml + - 156-k8s_info-vars-example.yaml + - 157-helm-create-namespace.yaml + - 158-k8s_exec-return-rc.yaml + - 159-kubectl-redact-token-and-password.yaml + - 168-test-stable-ansible.yaml + - 65_kubectl.yml + modules: + - description: Manage Helm plugins + name: helm_plugin + namespace: '' + - description: Gather information about Helm plugins + name: helm_plugin_info + namespace: '' + release_date: '2020-07-28' + 1.1.0: + changes: + bugfixes: + - common - handle exception raised due to DynamicClient (https://github.com/ansible-collections/community.kubernetes/pull/224). + - helm - add replace parameter (https://github.com/ansible-collections/community.kubernetes/issues/106). + - k8s (inventory) - Set the connection plugin and transport separately (https://github.com/ansible-collections/community.kubernetes/pull/208). + - k8s (inventory) - Specify FQCN for k8s inventory plugin to fix use with Ansible + 2.9 (https://github.com/ansible-collections/community.kubernetes/pull/250). + - k8s_info - add wait functionality (https://github.com/ansible-collections/community.kubernetes/issues/18). + major_changes: + - k8s - Add support for template parameter (https://github.com/ansible-collections/community.kubernetes/pull/230). + - k8s_* - Add support for vaulted kubeconfig and src (https://github.com/ansible-collections/community.kubernetes/pull/193). + minor_changes: + - Add Makefile and downstream build script for kubernetes.core (https://github.com/ansible-collections/community.kubernetes/pull/197). + - Add execution environment metadata (https://github.com/ansible-collections/community.kubernetes/pull/211). + - Add probot stale bot configuration to autoclose issues (https://github.com/ansible-collections/community.kubernetes/pull/196). + - Added a contribution guide (https://github.com/ansible-collections/community.kubernetes/pull/192). + - Refactor module_utils (https://github.com/ansible-collections/community.kubernetes/pull/223). + - Replace KubernetesAnsibleModule class with dummy class (https://github.com/ansible-collections/community.kubernetes/pull/227). + - Replace KubernetesRawModule class with K8sAnsibleMixin (https://github.com/ansible-collections/community.kubernetes/pull/231). + - common - Do not mark task as changed when diff is irrelevant (https://github.com/ansible-collections/community.kubernetes/pull/228). + - helm - Add appVersion idempotence check to Helm (https://github.com/ansible-collections/community.kubernetes/pull/246). + - helm - Return status in check mode (https://github.com/ansible-collections/community.kubernetes/pull/192). + - helm - Support for single or multiple values files (https://github.com/ansible-collections/community.kubernetes/pull/93). + - helm_* - Support vaulted kubeconfig (https://github.com/ansible-collections/community.kubernetes/pull/229). + - k8s - SelfSubjectAccessReviews supported when 405 response received (https://github.com/ansible-collections/community.kubernetes/pull/237). + - k8s - add testcase for adding multiple resources using template parameter + (https://github.com/ansible-collections/community.kubernetes/issues/243). + - k8s_info - Add support for wait (https://github.com/ansible-collections/community.kubernetes/pull/235). + - k8s_info - update custom resource example (https://github.com/ansible-collections/community.kubernetes/issues/202). + - kubectl plugin - correct console log (https://github.com/ansible-collections/community.kubernetes/issues/200). + - raw - Handle exception raised by underlying APIs (https://github.com/ansible-collections/community.kubernetes/pull/180). + fragments: + - 106-helm_replace.yml + - 180_raw_handle_exception.yml + - 18_k8s_info_wait.yml + - 191_contributing.yml + - 192_helm-status-check-mode.yml + - 193_vault-kubeconfig-support.yml + - 196_probot-stale-bot.yml + - 197_downstream-makefile.yml + - 200_kubectl_fix.yml + - 202_k8s_info.yml + - 208_set-connection-plugin-transport.yml + - 211_execution-env-meta.yml + - 223_refactor-module_utils.yml + - 224_handle-dynamicclient-exception.yml + - 227_replace-kubernetesansiblemodule-class.yml + - 228_dont-mark-changed-if-diff-irrelevant.yml + - 229_helm-vault-support.yml + - 230_k8s-template-parameter.yml + - 231_k8sansiblemixin-module.yml + - 234_k8s-selfsubjectaccessreviews.yml + - 235_k8s_info-wait-support.yml + - 243_template.yml + - 246_helm-appversion-check.yml + - 252_connection-plugin-fqcn-fix.yml + - 93_helm-multiple-values-files.yml + release_date: '2020-10-08' + 1.1.1: + changes: + bugfixes: + - k8s - Fix sanity test 'compile' failing because of positional args (https://github.com/ansible-collections/community.kubernetes/issues/260). + fragments: + - 260_k8s-positional-args.yml + release_date: '2020-10-09' diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/config.yaml b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/config.yaml new file mode 100644 index 00000000..519494a3 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/config.yaml @@ -0,0 +1,30 @@ +--- +changelog_filename_template: ../CHANGELOG.rst +changelog_filename_version_depth: 0 +changes_file: changelog.yaml +changes_format: combined +keep_fragments: true +mention_ancestor: true +new_plugins_after_name: removed_features +notesdir: fragments +prelude_section_name: release_summary +prelude_section_title: Release Summary +sections: +- - major_changes + - Major Changes +- - minor_changes + - Minor Changes +- - breaking_changes + - Breaking Changes / Porting Guide +- - deprecated_features + - Deprecated Features +- - removed_features + - Removed Features (previously deprecated) +- - security_fixes + - Security Fixes +- - bugfixes + - Bugfixes +- - known_issues + - Known Issues +title: Kubernetes Collection +trivial_section_name: trivial diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/100-k8s_scale-fix-wait.yaml b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/100-k8s_scale-fix-wait.yaml new file mode 100644 index 00000000..9dc860fa --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/100-k8s_scale-fix-wait.yaml @@ -0,0 +1,3 @@ +--- +bugfixes: + - k8s_scale - Fix scale wait and add tests (https://github.com/ansible-collections/community.kubernetes/pull/100). diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/102-dont-require-codecov-check-prs.yaml b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/102-dont-require-codecov-check-prs.yaml new file mode 100644 index 00000000..829caf09 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/102-dont-require-codecov-check-prs.yaml @@ -0,0 +1,3 @@ +--- +minor_changes: + - Don't require project coverage check on PRs (https://github.com/ansible-collections/community.kubernetes/pull/102). diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/103-fix-inventory-docs-structure.yaml b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/103-fix-inventory-docs-structure.yaml new file mode 100644 index 00000000..d26d99c1 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/103-fix-inventory-docs-structure.yaml @@ -0,0 +1,3 @@ +--- +bugfixes: + - Fix suboption docs structure for inventory plugins (https://github.com/ansible-collections/community.kubernetes/pull/103). diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/105-improved-k8s-diffs.yaml b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/105-improved-k8s-diffs.yaml new file mode 100644 index 00000000..5e1f74ea --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/105-improved-k8s-diffs.yaml @@ -0,0 +1,3 @@ +--- +minor_changes: + - Use improved kubernetes diffs where possible (https://github.com/ansible-collections/community.kubernetes/pull/105). diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/106-helm_replace.yml b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/106-helm_replace.yml new file mode 100644 index 00000000..6fd7b654 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/106-helm_replace.yml @@ -0,0 +1,2 @@ +bugfixes: +- helm - add replace parameter (https://github.com/ansible-collections/community.kubernetes/issues/106). diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/107-action-groups-module_defaults.yaml b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/107-action-groups-module_defaults.yaml new file mode 100644 index 00000000..06672d98 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/107-action-groups-module_defaults.yaml @@ -0,0 +1,3 @@ +--- +minor_changes: + - Add action groups for playbooks with module_defaults (https://github.com/ansible-collections/community.kubernetes/pull/107). diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/110-helm-minor-refactor.yaml b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/110-helm-minor-refactor.yaml new file mode 100644 index 00000000..4e929176 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/110-helm-minor-refactor.yaml @@ -0,0 +1,3 @@ +--- +minor_changes: + - helm - minor code refactoring (https://github.com/ansible-collections/community.kubernetes/pull/110). diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/114-minor-docs-fixes.yaml b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/114-minor-docs-fixes.yaml new file mode 100644 index 00000000..106841a9 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/114-minor-docs-fixes.yaml @@ -0,0 +1,3 @@ +--- +minor_changes: + - Minor documentation fixes and use of FQCN in some examples (https://github.com/ansible-collections/community.kubernetes/pull/114). diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/115-helm-add-atomic.yaml b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/115-helm-add-atomic.yaml new file mode 100644 index 00000000..68f5bab3 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/115-helm-add-atomic.yaml @@ -0,0 +1,3 @@ +--- +minor_changes: + - helm - add 'atomic' option (https://github.com/ansible-collections/community.kubernetes/pull/115). diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/117-helm-minor-refactor.yaml b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/117-helm-minor-refactor.yaml new file mode 100644 index 00000000..391932fe --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/117-helm-minor-refactor.yaml @@ -0,0 +1,3 @@ +--- +minor_changes: + - helm_info and helm_repository - minor code refactor (https://github.com/ansible-collections/community.kubernetes/pull/117). diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/118-k8s-lookup-handle-set-object.yaml b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/118-k8s-lookup-handle-set-object.yaml new file mode 100644 index 00000000..6580f873 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/118-k8s-lookup-handle-set-object.yaml @@ -0,0 +1,3 @@ +--- +minor_changes: + - k8s - Handle set object retrieved from lookup plugin (https://github.com/ansible-collections/community.kubernetes/pull/118). diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/119-handle-kubeconfig-error.yaml b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/119-handle-kubeconfig-error.yaml new file mode 100644 index 00000000..97c2201f --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/119-handle-kubeconfig-error.yaml @@ -0,0 +1,3 @@ +--- +bugfixes: + - Handle invalid kubeconfig parsing error (https://github.com/ansible-collections/community.kubernetes/pull/119). diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/122_k8s_exec_rc.yml b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/122_k8s_exec_rc.yml new file mode 100644 index 00000000..91d09b67 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/122_k8s_exec_rc.yml @@ -0,0 +1,2 @@ +minor_changes: +- k8s_exec - return RC for the command executed (https://github.com/ansible-collections/community.kubernetes/issues/122). diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/123-flake8.yaml b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/123-flake8.yaml new file mode 100644 index 00000000..3f9abc20 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/123-flake8.yaml @@ -0,0 +1,3 @@ +--- +minor_changes: + - Check Python code using flake8 (https://github.com/ansible-collections/community.kubernetes/pull/123). diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/126-requires_ansible-version-constraints.yaml b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/126-requires_ansible-version-constraints.yaml new file mode 100644 index 00000000..3ff31dfa --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/126-requires_ansible-version-constraints.yaml @@ -0,0 +1,3 @@ +--- +minor_changes: + - Add requires_ansible version constraints to runtime.yml (https://github.com/ansible-collections/community.kubernetes/pull/126). diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/127-remove-action_groups_redirection.yaml b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/127-remove-action_groups_redirection.yaml new file mode 100644 index 00000000..1827457c --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/127-remove-action_groups_redirection.yaml @@ -0,0 +1,3 @@ +--- +minor_changes: + - Remove action_groups_redirection entry from meta/runtime.yml (https://github.com/ansible-collections/community.kubernetes/pull/127). diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/13-fix-elements-argspec.yaml b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/13-fix-elements-argspec.yaml new file mode 100644 index 00000000..a9e7ef89 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/13-fix-elements-argspec.yaml @@ -0,0 +1,3 @@ +--- +bugfixes: + - k8s - Fix argspec for 'elements' (https://github.com/ansible-collections/community.kubernetes/issues/13). diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/130-add-sanity-ignore-211.yaml b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/130-add-sanity-ignore-211.yaml new file mode 100644 index 00000000..ad7af68a --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/130-add-sanity-ignore-211.yaml @@ -0,0 +1,3 @@ +--- +minor_changes: + - Add sanity test ignore file for Ansible 2.11 (https://github.com/ansible-collections/community.kubernetes/pull/130). diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/131-changelog-fragments.yaml b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/131-changelog-fragments.yaml new file mode 100644 index 00000000..3b3d0397 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/131-changelog-fragments.yaml @@ -0,0 +1,3 @@ +--- +major_changes: + - Add changelog and fragments and document changelog process (https://github.com/ansible-collections/community.kubernetes/pull/131). diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/139-fix-manifest-ends-with-separator.yml b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/139-fix-manifest-ends-with-separator.yml new file mode 100644 index 00000000..2742241d --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/139-fix-manifest-ends-with-separator.yml @@ -0,0 +1,3 @@ +--- +bugfixes: + - raw - handle condition when definition is none (https://github.com/ansible-collections/community.kubernetes/pull/139). diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/14-k8s_exec-new-module.yaml b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/14-k8s_exec-new-module.yaml new file mode 100644 index 00000000..40594b80 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/14-k8s_exec-new-module.yaml @@ -0,0 +1,3 @@ +--- +major_changes: + - k8s_exec - New module for executing commands on pods via Kubernetes API (https://github.com/ansible-collections/community.kubernetes/pull/14). diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/140-kubeconfig-env.yaml b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/140-kubeconfig-env.yaml new file mode 100644 index 00000000..0d00dd7b --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/140-kubeconfig-env.yaml @@ -0,0 +1,4 @@ +--- +minor_changes: + - helm - add support for K8S_AUTH_KUBECONFIG and K8S_AUTH_CONTEXT environment variable (https://github.com/ansible-collections/community.kubernetes/issues/140). + - helm_info - add support for K8S_AUTH_KUBECONFIG and K8S_AUTH_CONTEXT environment variable (https://github.com/ansible-collections/community.kubernetes/issues/140). diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/141-helm-add-k8s-env-vars.yaml b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/141-helm-add-k8s-env-vars.yaml new file mode 100644 index 00000000..e6518d49 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/141-helm-add-k8s-env-vars.yaml @@ -0,0 +1,3 @@ +--- +minor_changes: + - helm - Add support for K8S_AUTH_CONTEXT, K8S_AUTH_KUBECONFIG env (https://github.com/ansible-collections/community.kubernetes/pull/141). diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/146-fqcn-in-docs.yaml b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/146-fqcn-in-docs.yaml new file mode 100644 index 00000000..0ff70860 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/146-fqcn-in-docs.yaml @@ -0,0 +1,3 @@ +--- +minor_changes: + - Use FQCN in module docs and plugin examples (https://github.com/ansible-collections/community.kubernetes/pull/146). diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/148-update-base-branch-main.yaml b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/148-update-base-branch-main.yaml new file mode 100644 index 00000000..268200c5 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/148-update-base-branch-main.yaml @@ -0,0 +1,3 @@ +--- +minor_changes: + - Update base branch to 'main' (https://github.com/ansible-collections/community.kubernetes/issues/148). diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/152-helm-context-aliases.yml b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/152-helm-context-aliases.yml new file mode 100644 index 00000000..4a3d0876 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/152-helm-context-aliases.yml @@ -0,0 +1,4 @@ +--- +minor_changes: + - helm - add aliases context for kube_context (https://github.com/ansible-collections/community.kubernetes/pull/152). + - helm_info - add aliases context for kube_context (https://github.com/ansible-collections/community.kubernetes/pull/152). diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/153-update-openshift-requirements.yaml b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/153-update-openshift-requirements.yaml new file mode 100644 index 00000000..502449b0 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/153-update-openshift-requirements.yaml @@ -0,0 +1,3 @@ +--- +bugfixes: + - Update openshift requirements in k8s module doc (https://github.com/ansible-collections/community.kubernetes/pull/153). diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/154-helm_plugin-helm_plugin_info-new-modules.yaml b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/154-helm_plugin-helm_plugin_info-new-modules.yaml new file mode 100644 index 00000000..33a550f8 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/154-helm_plugin-helm_plugin_info-new-modules.yaml @@ -0,0 +1,4 @@ +--- +major_changes: + - helm_plugin - new module to manage Helm plugins (https://github.com/ansible-collections/community.kubernetes/pull/154). + - helm_plugin_info - new modules to gather information about Helm plugins (https://github.com/ansible-collections/community.kubernetes/pull/154). diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/155-ensure-check-mode-waits.yaml b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/155-ensure-check-mode-waits.yaml new file mode 100644 index 00000000..5a68c343 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/155-ensure-check-mode-waits.yaml @@ -0,0 +1,3 @@ +--- +minor_changes: + - Ensure check mode results are as expected (https://github.com/ansible-collections/community.kubernetes/pull/155). diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/156-k8s_info-vars-example.yaml b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/156-k8s_info-vars-example.yaml new file mode 100644 index 00000000..0ebe4318 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/156-k8s_info-vars-example.yaml @@ -0,0 +1,3 @@ +--- +minor_changes: + - k8s_info - Update example using vars (https://github.com/ansible-collections/community.kubernetes/pull/156). diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/157-helm-create-namespace.yaml b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/157-helm-create-namespace.yaml new file mode 100644 index 00000000..8e3ace60 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/157-helm-create-namespace.yaml @@ -0,0 +1,3 @@ +--- +minor_changes: + - helm - Allow creating namespaces with Helm (https://github.com/ansible-collections/community.kubernetes/pull/157). diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/158-k8s_exec-return-rc.yaml b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/158-k8s_exec-return-rc.yaml new file mode 100644 index 00000000..f8a10fb7 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/158-k8s_exec-return-rc.yaml @@ -0,0 +1,3 @@ +--- +major_changes: + - k8s_exec - Return rc for the command executed (https://github.com/ansible-collections/community.kubernetes/pull/158). diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/159-kubectl-redact-token-and-password.yaml b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/159-kubectl-redact-token-and-password.yaml new file mode 100644 index 00000000..b58ffd97 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/159-kubectl-redact-token-and-password.yaml @@ -0,0 +1,3 @@ +--- +security_fixes: + - kubectl - redacted token and password from console log (https://github.com/ansible-collections/community.kubernetes/pull/159). diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/16-k8s_log-new-module.yaml b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/16-k8s_log-new-module.yaml new file mode 100644 index 00000000..e6ae7335 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/16-k8s_log-new-module.yaml @@ -0,0 +1,3 @@ +--- +major_changes: + - k8s_log - New module for retrieving pod logs (https://github.com/ansible-collections/community.kubernetes/pull/16). diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/168-test-stable-ansible.yaml b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/168-test-stable-ansible.yaml new file mode 100644 index 00000000..d05a4b0a --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/168-test-stable-ansible.yaml @@ -0,0 +1,3 @@ +--- +bugfixes: + - Test against stable ansible branch so molecule tests work (https://github.com/ansible-collections/community.kubernetes/pull/168). diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/180_raw_handle_exception.yml b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/180_raw_handle_exception.yml new file mode 100644 index 00000000..6fb7d194 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/180_raw_handle_exception.yml @@ -0,0 +1,3 @@ +--- +minor_changes: + - raw - Handle exception raised by underlying APIs (https://github.com/ansible-collections/community.kubernetes/pull/180). diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/18_k8s_info_wait.yml b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/18_k8s_info_wait.yml new file mode 100644 index 00000000..157eb97c --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/18_k8s_info_wait.yml @@ -0,0 +1,2 @@ +bugfixes: +- k8s_info - add wait functionality (https://github.com/ansible-collections/community.kubernetes/issues/18). diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/191_contributing.yml b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/191_contributing.yml new file mode 100644 index 00000000..acb171d3 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/191_contributing.yml @@ -0,0 +1,3 @@ +--- +minor_changes: + - Added a contribution guide (https://github.com/ansible-collections/community.kubernetes/pull/192). diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/192_helm-status-check-mode.yml b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/192_helm-status-check-mode.yml new file mode 100644 index 00000000..96e403ae --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/192_helm-status-check-mode.yml @@ -0,0 +1,3 @@ +--- +minor_changes: + - helm - Return status in check mode (https://github.com/ansible-collections/community.kubernetes/pull/192). diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/193_vault-kubeconfig-support.yml b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/193_vault-kubeconfig-support.yml new file mode 100644 index 00000000..8843d1c0 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/193_vault-kubeconfig-support.yml @@ -0,0 +1,3 @@ +--- +major_changes: + - k8s_* - Add support for vaulted kubeconfig and src (https://github.com/ansible-collections/community.kubernetes/pull/193). diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/196_probot-stale-bot.yml b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/196_probot-stale-bot.yml new file mode 100644 index 00000000..58808f46 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/196_probot-stale-bot.yml @@ -0,0 +1,3 @@ +--- +minor_changes: + - Add probot stale bot configuration to autoclose issues (https://github.com/ansible-collections/community.kubernetes/pull/196). diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/197_downstream-makefile.yml b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/197_downstream-makefile.yml new file mode 100644 index 00000000..9470fceb --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/197_downstream-makefile.yml @@ -0,0 +1,3 @@ +--- +minor_changes: + - Add Makefile and downstream build script for kubernetes.core (https://github.com/ansible-collections/community.kubernetes/pull/197). diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/200_kubectl_fix.yml b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/200_kubectl_fix.yml new file mode 100644 index 00000000..91430dc1 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/200_kubectl_fix.yml @@ -0,0 +1,2 @@ +minor_changes: +- kubectl plugin - correct console log (https://github.com/ansible-collections/community.kubernetes/issues/200). diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/202_k8s_info.yml b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/202_k8s_info.yml new file mode 100644 index 00000000..78f26256 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/202_k8s_info.yml @@ -0,0 +1,2 @@ +minor_changes: +- k8s_info - update custom resource example (https://github.com/ansible-collections/community.kubernetes/issues/202). diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/208_set-connection-plugin-transport.yml b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/208_set-connection-plugin-transport.yml new file mode 100644 index 00000000..56f17b86 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/208_set-connection-plugin-transport.yml @@ -0,0 +1,3 @@ +--- +bugfixes: + - k8s (inventory) - Set the connection plugin and transport separately (https://github.com/ansible-collections/community.kubernetes/pull/208). diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/211_execution-env-meta.yml b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/211_execution-env-meta.yml new file mode 100644 index 00000000..996677ae --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/211_execution-env-meta.yml @@ -0,0 +1,3 @@ +--- +minor_changes: + - Add execution environment metadata (https://github.com/ansible-collections/community.kubernetes/pull/211). diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/223_refactor-module_utils.yml b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/223_refactor-module_utils.yml new file mode 100644 index 00000000..3b8abc58 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/223_refactor-module_utils.yml @@ -0,0 +1,3 @@ +--- +minor_changes: + - Refactor module_utils (https://github.com/ansible-collections/community.kubernetes/pull/223). diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/224_handle-dynamicclient-exception.yml b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/224_handle-dynamicclient-exception.yml new file mode 100644 index 00000000..305bb9d5 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/224_handle-dynamicclient-exception.yml @@ -0,0 +1,3 @@ +--- +bugfixes: + - common - handle exception raised due to DynamicClient (https://github.com/ansible-collections/community.kubernetes/pull/224). diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/227_replace-kubernetesansiblemodule-class.yml b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/227_replace-kubernetesansiblemodule-class.yml new file mode 100644 index 00000000..f492e7b9 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/227_replace-kubernetesansiblemodule-class.yml @@ -0,0 +1,3 @@ +--- +minor_changes: + - Replace KubernetesAnsibleModule class with dummy class (https://github.com/ansible-collections/community.kubernetes/pull/227). diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/228_dont-mark-changed-if-diff-irrelevant.yml b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/228_dont-mark-changed-if-diff-irrelevant.yml new file mode 100644 index 00000000..515a4be7 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/228_dont-mark-changed-if-diff-irrelevant.yml @@ -0,0 +1,3 @@ +--- +minor_changes: + - common - Do not mark task as changed when diff is irrelevant (https://github.com/ansible-collections/community.kubernetes/pull/228). diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/229_helm-vault-support.yml b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/229_helm-vault-support.yml new file mode 100644 index 00000000..4400f3a1 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/229_helm-vault-support.yml @@ -0,0 +1,3 @@ +--- +minor_changes: + - helm_* - Support vaulted kubeconfig (https://github.com/ansible-collections/community.kubernetes/pull/229). diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/230_k8s-template-parameter.yml b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/230_k8s-template-parameter.yml new file mode 100644 index 00000000..c7f48f2c --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/230_k8s-template-parameter.yml @@ -0,0 +1,3 @@ +--- +major_changes: + - k8s - Add support for template parameter (https://github.com/ansible-collections/community.kubernetes/pull/230). diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/231_k8sansiblemixin-module.yml b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/231_k8sansiblemixin-module.yml new file mode 100644 index 00000000..01700f01 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/231_k8sansiblemixin-module.yml @@ -0,0 +1,3 @@ +--- +minor_changes: + - Replace KubernetesRawModule class with K8sAnsibleMixin (https://github.com/ansible-collections/community.kubernetes/pull/231). diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/234_k8s-selfsubjectaccessreviews.yml b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/234_k8s-selfsubjectaccessreviews.yml new file mode 100644 index 00000000..91454fab --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/234_k8s-selfsubjectaccessreviews.yml @@ -0,0 +1,3 @@ +--- +minor_changes: + - k8s - SelfSubjectAccessReviews supported when 405 response received (https://github.com/ansible-collections/community.kubernetes/pull/237). diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/235_k8s_info-wait-support.yml b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/235_k8s_info-wait-support.yml new file mode 100644 index 00000000..ec60040c --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/235_k8s_info-wait-support.yml @@ -0,0 +1,3 @@ +--- +minor_changes: + - k8s_info - Add support for wait (https://github.com/ansible-collections/community.kubernetes/pull/235). diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/243_template.yml b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/243_template.yml new file mode 100644 index 00000000..9a1630eb --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/243_template.yml @@ -0,0 +1,2 @@ +minor_changes: +- k8s - add testcase for adding multiple resources using template parameter (https://github.com/ansible-collections/community.kubernetes/issues/243). diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/246_helm-appversion-check.yml b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/246_helm-appversion-check.yml new file mode 100644 index 00000000..7f87f20d --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/246_helm-appversion-check.yml @@ -0,0 +1,3 @@ +--- +minor_changes: + - helm - Add appVersion idempotence check to Helm (https://github.com/ansible-collections/community.kubernetes/pull/246). diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/252_connection-plugin-fqcn-fix.yml b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/252_connection-plugin-fqcn-fix.yml new file mode 100644 index 00000000..fd4a1419 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/252_connection-plugin-fqcn-fix.yml @@ -0,0 +1,3 @@ +--- +bugfixes: + - k8s (inventory) - Specify FQCN for k8s inventory plugin to fix use with Ansible 2.9 (https://github.com/ansible-collections/community.kubernetes/pull/250). diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/260_k8s-positional-args.yml b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/260_k8s-positional-args.yml new file mode 100644 index 00000000..18e18d75 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/260_k8s-positional-args.yml @@ -0,0 +1,3 @@ +--- +bugfixes: + - k8s - Fix sanity test 'compile' failing because of positional args (https://github.com/ansible-collections/community.kubernetes/issues/260). diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/33-k8s_service-fix-argspec.yaml b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/33-k8s_service-fix-argspec.yaml new file mode 100644 index 00000000..91daa0d6 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/33-k8s_service-fix-argspec.yaml @@ -0,0 +1,3 @@ +--- +bugfixes: + - k8s_service - Fix argspec (https://github.com/ansible-collections/community.kubernetes/issues/33). diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/35-wait-conditions.yaml b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/35-wait-conditions.yaml new file mode 100644 index 00000000..2f6dd67c --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/35-wait-conditions.yaml @@ -0,0 +1,3 @@ +--- +minor_changes: + - Improve k8s Deployment and Daemonset wait conditions (https://github.com/ansible-collections/community.kubernetes/pull/35). diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/4-k8s-prepare-collection-for-release.yaml b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/4-k8s-prepare-collection-for-release.yaml new file mode 100644 index 00000000..4f9a7bcd --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/4-k8s-prepare-collection-for-release.yaml @@ -0,0 +1,12 @@ +--- +major_changes: + - k8s - Inventory source migrated from Ansible 2.9 to Kubernetes collection. + - k8s - Module migrated from Ansible 2.9 to Kubernetes collection. + - k8s - Lookup plugin migrated from Ansible 2.9 to Kubernetes collection. + - k8s_auth - Module migrated from Ansible 2.9 to Kubernetes collection. + - k8s_info - Module migrated from Ansible 2.9 to Kubernetes collection. + - k8s_scale - Module migrated from Ansible 2.9 to Kubernetes collection. + - k8s_service - Module migrated from Ansible 2.9 to Kubernetes collection. + - k8s_config_resource_name - Filter plugin migrated from Ansible 2.9 to Kubernetes collection. + - kubectl - Connection plugin migrated from Ansible 2.9 to Kubernetes collection. + - openshift - Inventory source migrated from Ansible 2.9 to Kubernetes collection. diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/49-k8s-add-persist_config-option.yaml b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/49-k8s-add-persist_config-option.yaml new file mode 100644 index 00000000..ef4c3f4d --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/49-k8s-add-persist_config-option.yaml @@ -0,0 +1,3 @@ +--- +minor_changes: + - k8s - Added ``persist_config`` option for persisting refreshed tokens (https://github.com/ansible-collections/community.kubernetes/issues/49). diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/51-kubectl-security-disclosure.yaml b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/51-kubectl-security-disclosure.yaml new file mode 100644 index 00000000..0d2fd915 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/51-kubectl-security-disclosure.yaml @@ -0,0 +1,3 @@ +--- +security_fixes: + - kubectl - Warn about information disclosure when using options like ``kubectl_password``, ``kubectl_extra_args``, and ``kubectl_token`` to pass data through to the command line using the ``kubectl`` connection plugin (https://github.com/ansible-collections/community.kubernetes/pull/51). diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/52-kubectl-connection-docsfix.yaml b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/52-kubectl-connection-docsfix.yaml new file mode 100644 index 00000000..ff71021c --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/52-kubectl-connection-docsfix.yaml @@ -0,0 +1,3 @@ +--- +bugfixes: + - kubectl - Fix documentation in kubectl connection plugin (https://github.com/ansible-collections/community.kubernetes/pull/52). diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/54-k8s-add-exception-handling.yaml b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/54-k8s-add-exception-handling.yaml new file mode 100644 index 00000000..73fd6c54 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/54-k8s-add-exception-handling.yaml @@ -0,0 +1,3 @@ +--- +bugfixes: + - k8s - Add exception handling when retrieving k8s client (https://github.com/ansible-collections/community.kubernetes/pull/54). diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/56-k8s-from_yaml-docs-examples.yaml b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/56-k8s-from_yaml-docs-examples.yaml new file mode 100644 index 00000000..399b1ef8 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/56-k8s-from_yaml-docs-examples.yaml @@ -0,0 +1,3 @@ +--- +bugfixes: + - k8s - Use ``from_yaml`` filter with lookup examples in ``k8s`` module documentation examples (https://github.com/ansible-collections/community.kubernetes/pull/56). diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/61-helm-new-modules.yaml b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/61-helm-new-modules.yaml new file mode 100644 index 00000000..d741500d --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/61-helm-new-modules.yaml @@ -0,0 +1,5 @@ +--- +major_changes: + - helm - New module for managing Helm charts (https://github.com/ansible-collections/community.kubernetes/pull/61). + - helm_info - New module for retrieving Helm chart information (https://github.com/ansible-collections/community.kubernetes/pull/61). + - helm_repository - New module for managing Helm repositories (https://github.com/ansible-collections/community.kubernetes/pull/61). diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/65_kubectl.yml b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/65_kubectl.yml new file mode 100644 index 00000000..0c30a61b --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/65_kubectl.yml @@ -0,0 +1,2 @@ +security_fixes: +- kubectl - connection plugin now redact kubectl_token and kubectl_password in console log (https://github.com/ansible-collections/community.kubernetes/issues/65). diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/69-k8s_log-dont-parse-as-json.yaml b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/69-k8s_log-dont-parse-as-json.yaml new file mode 100644 index 00000000..bf34a741 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/69-k8s_log-dont-parse-as-json.yaml @@ -0,0 +1,3 @@ +--- +bugfixes: + - k8s_log - Module no longer attempts to parse log as JSON (https://github.com/ansible-collections/community.kubernetes/pull/69). diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/78-github-actions-workflow.yaml b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/78-github-actions-workflow.yaml new file mode 100644 index 00000000..7985746e --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/78-github-actions-workflow.yaml @@ -0,0 +1,3 @@ +--- +bugfixes: + - Update GitHub Actions workflow for better CI stability (https://github.com/ansible-collections/community.kubernetes/pull/78). diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/81-rename-repository.yaml b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/81-rename-repository.yaml new file mode 100644 index 00000000..dd77b24e --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/81-rename-repository.yaml @@ -0,0 +1,3 @@ +--- +minor_changes: + - Rename repository to ``community.kubernetes`` (https://github.com/ansible-collections/community.kubernetes/pull/81). diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/84-check_mode-service-change.yaml b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/84-check_mode-service-change.yaml new file mode 100644 index 00000000..974b2ace --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/84-check_mode-service-change.yaml @@ -0,0 +1,3 @@ +--- +bugfixes: + - Make sure Service changes run correctly in check_mode (https://github.com/ansible-collections/community.kubernetes/pull/84). diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/85-exclude-unnecessary-files-when-building.yaml b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/85-exclude-unnecessary-files-when-building.yaml new file mode 100644 index 00000000..f7f4be75 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/85-exclude-unnecessary-files-when-building.yaml @@ -0,0 +1,3 @@ +--- +bugfixes: + - Make sure extra files are not included in built collection (https://github.com/ansible-collections/community.kubernetes/pull/85). diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/93_helm-multiple-values-files.yml b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/93_helm-multiple-values-files.yml new file mode 100644 index 00000000..f6db35d6 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/93_helm-multiple-values-files.yml @@ -0,0 +1,3 @@ +--- +minor_changes: + - helm - Support for single or multiple values files (https://github.com/ansible-collections/community.kubernetes/pull/93). diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/94-openshift-apply-test.yaml b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/94-openshift-apply-test.yaml new file mode 100644 index 00000000..e5a53376 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/94-openshift-apply-test.yaml @@ -0,0 +1,3 @@ +--- +minor_changes: + - Add test for openshift apply bug (https://github.com/ansible-collections/community.kubernetes/pull/94). diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/95-remove-ANSIBLE_METADATA.yaml b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/95-remove-ANSIBLE_METADATA.yaml new file mode 100644 index 00000000..e0370031 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/95-remove-ANSIBLE_METADATA.yaml @@ -0,0 +1,3 @@ +--- +minor_changes: + - Remove deprecated ANSIBLE_METADATA field (https://github.com/ansible-collections/community.kubernetes/pull/95). diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/97-remove-k8s_facts-deprecation.yaml b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/97-remove-k8s_facts-deprecation.yaml new file mode 100644 index 00000000..5fa7abcb --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/97-remove-k8s_facts-deprecation.yaml @@ -0,0 +1,3 @@ +--- +bugfixes: + - k8s_info - remove unneccessary k8s_facts deprecation notice (https://github.com/ansible-collections/community.kubernetes/pull/97). diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/98-add-version_added.yaml b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/98-add-version_added.yaml new file mode 100644 index 00000000..1c82b0e9 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/changelogs/fragments/98-add-version_added.yaml @@ -0,0 +1,3 @@ +--- +minor_changes: + - Add version_added to each new collection module (https://github.com/ansible-collections/community.kubernetes/pull/98). diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/codecov.yml b/collections-debian-merged/ansible_collections/community/kubernetes/codecov.yml new file mode 100644 index 00000000..71e957c6 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/codecov.yml @@ -0,0 +1,8 @@ +--- +coverage: + precision: 2 + round: down + range: "70...100" + status: + project: + default: false diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/meta/runtime.yml b/collections-debian-merged/ansible_collections/community/kubernetes/meta/runtime.yml new file mode 100644 index 00000000..0e5e3690 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/meta/runtime.yml @@ -0,0 +1,43 @@ +--- +requires_ansible: '>=2.9' + +action_groups: + helm: + - helm + - helm_info + - helm_repository + k8s: + - k8s + - k8s_auth + - k8s_exec + - k8s_facts + - k8s_info + - k8s_log + - k8s_scale + - k8s_service + +plugin_routing: + modules: + # k8s_facts was originally slated for removal in Ansible 2.13. + k8s_facts: + redirect: community.kubernetes.k8s_info + deprecation: + removal_version: 2.0.0 + warning_text: Use community.kubernetes.k8s_info instead. + k8s_raw: + tombstone: + removal_version: 0.1.0 + warning_text: The k8s_raw module was slated for deprecation in Ansible 2.10 and has been removed. Use community.kubernetes.k8s instead. + openshift_raw: + tombstone: + removal_version: 0.1.0 + warning_text: The openshift_raw module was slated for deprecation in Ansible 2.10 and has been removed. Use community.kubernetes.k8s instead. + openshift_scale: + tombstone: + removal_version: 0.1.0 + warning_text: The openshift_scale module was slated for deprecation in Ansible 2.10 and has been removed. Use community.kubernetes.k8s_scale instead. + lookup: + openshift: + tombstone: + removal_version: 0.1.0 + warning_text: The openshift lookup plugin was slated for deprecation in Ansible 2.10 and has been removed. Use community.kubernetes.k8s instead. diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/converge.yml b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/converge.yml new file mode 100644 index 00000000..f6dcb454 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/converge.yml @@ -0,0 +1,119 @@ +--- +- name: Converge + hosts: localhost + connection: local + + collections: + - community.kubernetes + + vars_files: + - vars/main.yml + + tasks: + - name: Verify cluster is working. + k8s_info: + namespace: kube-system + kind: Pod + register: pod_list + + - name: Verify cluster has more than 5 pods running. + assert: + that: (pod_list.resources | count) > 5 + + - include_tasks: tasks/delete.yml + - include_tasks: tasks/scale.yml + - include_tasks: tasks/apply.yml + - include_tasks: tasks/waiter.yml + - include_tasks: tasks/full.yml + - include_tasks: tasks/exec.yml + - include_tasks: tasks/log.yml + - include_tasks: tasks/cluster_info.yml + - include_tasks: tasks/access_review.yml + - include_tasks: tasks/rollback.yml + + roles: + - helm + + post_tasks: + - name: Ensure namespace exists + k8s: + api_version: v1 + kind: Namespace + name: inventory + + - name: Add a deployment + k8s: + definition: + apiVersion: apps/v1 + kind: Deployment + metadata: + name: inventory + namespace: inventory + spec: + replicas: 1 + selector: + matchLabels: + app: "{{ k8s_pod_name }}" + template: "{{ k8s_pod_template }}" + wait: yes + wait_timeout: 120 + vars: + k8s_pod_name: inventory + k8s_pod_image: python + k8s_pod_command: + - python + - '-m' + - http.server + k8s_pod_env: + - name: TEST + value: test + + - meta: refresh_inventory + +- name: Verify inventory and connection plugins + hosts: namespace_inventory_pods + gather_facts: no + + vars: + file_content: | + Hello world + + tasks: + - name: End play if host not running (TODO should we not add these to the inventory?) + meta: end_host + when: pod_phase != "Running" + + - debug: var=hostvars + - setup: + + - debug: var=ansible_facts + + - name: Assert the TEST environment variable was retrieved + assert: + that: ansible_facts.env.TEST == 'test' + + - name: Copy a file into the host + copy: + content: '{{ file_content }}' + dest: /tmp/test_file + + - name: Retrieve the file from the host + slurp: + src: /tmp/test_file + register: slurped_file + + - name: Assert the file content matches expectations + assert: + that: (slurped_file.content|b64decode) == file_content + +- name: Delete inventory namespace + hosts: localhost + connection: local + gather_facts: no + tasks: + - name: Remove inventory namespace + k8s: + api_version: v1 + kind: Namespace + name: inventory + state: absent diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/molecule.yml b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/molecule.yml new file mode 100644 index 00000000..693cd351 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/molecule.yml @@ -0,0 +1,38 @@ +--- +driver: + name: delegated + options: + managed: false + login_cmd_template: 'docker exec -ti {instance} bash' + ansible_connection_options: + ansible_connection: docker +lint: | + set -e + yamllint . + flake8 +platforms: + - name: instance-kind +provisioner: + name: ansible + log: true + config_options: + inventory: + enable_plugins: community.kubernetes.k8s + lint: {} + inventory: + hosts: + plugin: community.kubernetes.k8s + host_vars: + localhost: + ansible_python_interpreter: '{{ ansible_playbook_python }}' + env: + ANSIBLE_FORCE_COLOR: 'true' + options: + vvv: True +scenario: + name: default + test_sequence: + - lint + - syntax + - converge + - verify diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/roles/helm/defaults/main.yml b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/roles/helm/defaults/main.yml new file mode 100644 index 00000000..b5a2a31f --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/roles/helm/defaults/main.yml @@ -0,0 +1,15 @@ +--- +helm_archive_name: "helm-{{ helm_version }}-{{ ansible_system | lower }}-amd64.tar.gz" +helm_binary: "/tmp/helm/{{ ansible_system | lower }}-amd64/helm" +helm_namespace: helm + +tiller_namespace: tiller +tiller_cluster_role: cluster-admin + +chart_test: "nginx-ingress" +chart_test_version: 1.32.0 +chart_test_version_upgrade: 1.33.0 +chart_test_repo: "https://kubernetes-charts.storage.googleapis.com" +chart_test_git_repo: "http://github.com/helm/charts.git" +chart_test_values: + revisionHistoryLimit: 0 diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/roles/helm/files/appversionless-chart/Chart.yaml b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/roles/helm/files/appversionless-chart/Chart.yaml new file mode 100644 index 00000000..c308a00a --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/roles/helm/files/appversionless-chart/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v2 +name: appversionless-chart +description: A chart used in molecule tests +type: application +version: 0.1.0 diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/roles/helm/files/test-chart/Chart.yaml b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/roles/helm/files/test-chart/Chart.yaml new file mode 100644 index 00000000..5d09a08c --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/roles/helm/files/test-chart/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v2 +name: test-chart +description: A chart used in molecule tests +type: application +version: 0.1.0 +appVersion: "default" diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/roles/helm/files/values.yaml b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/roles/helm/files/values.yaml new file mode 100644 index 00000000..7b057068 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/roles/helm/files/values.yaml @@ -0,0 +1,2 @@ +--- +revisionHistoryLimit: 0 diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/roles/helm/tasks/install.yml b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/roles/helm/tasks/install.yml new file mode 100644 index 00000000..8030aac7 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/roles/helm/tasks/install.yml @@ -0,0 +1,11 @@ +--- +- name: Init Helm folders + file: + path: /tmp/helm/ + state: directory + +- name: Unarchive Helm binary + unarchive: + src: 'https://get.helm.sh/{{ helm_archive_name }}' + dest: /tmp/helm/ + remote_src: yes diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/roles/helm/tasks/main.yml b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/roles/helm/tasks/main.yml new file mode 100644 index 00000000..e86d33df --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/roles/helm/tasks/main.yml @@ -0,0 +1,7 @@ +--- +- name: Run tests + include_tasks: run_test.yml + loop_control: + loop_var: helm_version + with_items: + - "v3.2.4" diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/roles/helm/tasks/run_test.yml b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/roles/helm/tasks/run_test.yml new file mode 100644 index 00000000..0384a2e4 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/roles/helm/tasks/run_test.yml @@ -0,0 +1,35 @@ +--- +- name: Ensure helm is not installed + file: + path: "{{ item }}" + state: absent + with_items: + - "/tmp/helm" + +- name: Check failed if helm is not installed + include_tasks: test_helm_not_installed.yml + +- name: "Install {{ helm_version }}" + include_tasks: install.yml + +- name: tests_repository + include_tasks: tests_repository.yml + +- name: Deploy charts + include_tasks: "tests_chart/{{ test_chart_type }}.yml" + loop_control: + loop_var: test_chart_type + with_items: + - from_local_path + - from_repository + - from_url + +- name: Test helm plugin + include_tasks: tests_helm_plugin.yml + +- name: Clean helm install + file: + path: "{{ item }}" + state: absent + with_items: + - "/tmp/helm/" diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/roles/helm/tasks/test_helm_not_installed.yml b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/roles/helm/tasks/test_helm_not_installed.yml new file mode 100644 index 00000000..0832dcb0 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/roles/helm/tasks/test_helm_not_installed.yml @@ -0,0 +1,15 @@ +--- +- name: Failed test when helm is not installed + helm: + binary_path: "{{ helm_binary}}_fake" + name: test + chart_ref: "{{ chart_test }}" + namespace: "{{ helm_namespace }}" + ignore_errors: yes + register: helm_missing_binary + +- name: Assert that helm is not installed + assert: + that: + - helm_missing_binary is failed + - "'No such file or directory' in helm_missing_binary.msg" diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/roles/helm/tasks/tests_chart.yml b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/roles/helm/tasks/tests_chart.yml new file mode 100644 index 00000000..bb600f9b --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/roles/helm/tasks/tests_chart.yml @@ -0,0 +1,287 @@ +--- +- name: Check helm_info empty + helm_info: + binary_path: "{{ helm_binary }}" + name: test + namespace: "{{ helm_namespace }}" + register: empty_info + +- name: "Assert that no charts are installed with helm_info" + assert: + that: + - empty_info.status is undefined + +- name: "Install fail {{ chart_test }} from {{ source }}" + helm: + binary_path: "{{ helm_binary }}" + name: test + chart_ref: "{{ chart_source }}" + chart_version: "{{ chart_source_version | default(omit) }}" + namespace: "{{ helm_namespace }}" + ignore_errors: yes + register: install_fail + +- name: "Assert that Install fail {{ chart_test }} from {{ source }}" + assert: + that: + - install_fail is failed + - "'Error: create: failed to create: namespaces \"' + helm_namespace + '\" not found' in install_fail.stderr" + +- name: "Install {{ chart_test }} from {{ source }}" + helm: + binary_path: "{{ helm_binary }}" + name: test + chart_ref: "{{ chart_source }}" + chart_version: "{{ chart_source_version | default(omit) }}" + namespace: "{{ helm_namespace }}" + create_namespace: true + register: install + +- name: "Assert that {{ chart_test }} chart is installed from {{ source }}" + assert: + that: + - install is changed + - install.status.chart == "{{ chart_test }}-{{ chart_test_version }}" + - install.status.status | lower == 'deployed' + +- name: Check helm_info content + helm_info: + binary_path: "{{ helm_binary }}" + name: test + namespace: "{{ helm_namespace }}" + register: content_info + +- name: "Assert that {{ chart_test }} is installed from {{ source }} with helm_info" + assert: + that: + - content_info.status.chart == "{{ chart_test }}-{{ chart_test_version }}" + - content_info.status.status | lower == 'deployed' + +- name: Check idempotency + helm: + binary_path: "{{ helm_binary }}" + name: test + chart_ref: "{{ chart_source }}" + chart_version: "{{ chart_source_version | default(omit) }}" + namespace: "{{ helm_namespace }}" + register: install + +- name: Assert idempotency + assert: + that: + - install is not changed + - install.status.chart == "{{ chart_test }}-{{ chart_test_version }}" + - install.status.status | lower == 'deployed' + +- name: "Add vars to {{ chart_test }} from {{ source }}" + helm: + binary_path: "{{ helm_binary }}" + name: test + chart_ref: "{{ chart_source }}" + chart_version: "{{ chart_source_version | default(omit) }}" + namespace: "{{ helm_namespace }}" + values: "{{ chart_test_values }}" + register: install + +- name: "Assert that {{ chart_test }} chart is upgraded with new var from {{ source }}" + assert: + that: + - install is changed + - install.status.status | lower == 'deployed' + - install.status.chart == "{{ chart_test }}-{{ chart_test_version }}" + - "install.status['values'].revisionHistoryLimit == 0" + +- name: Check idempotency after adding vars + helm: + binary_path: "{{ helm_binary }}" + name: test + chart_ref: "{{ chart_source }}" + chart_version: "{{ chart_source_version | default(omit) }}" + namespace: "{{ helm_namespace }}" + values: "{{ chart_test_values }}" + register: install + +- name: Assert idempotency after add vars + assert: + that: + - install is not changed + - install.status.status | lower == 'deployed' + - install.status.chart == "{{ chart_test }}-{{ chart_test_version }}" + - "install.status['values'].revisionHistoryLimit == 0" + +- name: "Remove Vars to {{ chart_test }} from {{ source }}" + helm: + binary_path: "{{ helm_binary }}" + name: test + chart_ref: "{{ chart_source }}" + chart_version: "{{ chart_source_version | default(omit) }}" + namespace: "{{ helm_namespace }}" + register: install + +- name: "Assert that {{ chart_test }} chart is upgraded with new var from {{ source }}" + assert: + that: + - install is changed + - install.status.status | lower == 'deployed' + - install.status.chart == "{{ chart_test }}-{{ chart_test_version }}" + - install.status['values'] == {} + +- name: Check idempotency after removing vars + helm: + binary_path: "{{ helm_binary }}" + name: test + chart_ref: "{{ chart_source }}" + chart_version: "{{ chart_source_version | default(omit) }}" + namespace: "{{ helm_namespace }}" + register: install + +- name: Assert idempotency after removing vars + assert: + that: + - install is not changed + - install.status.status | lower == 'deployed' + - install.status.chart == "{{ chart_test }}-{{ chart_test_version }}" + - install.status['values'] == {} + +- name: "Upgrade {{ chart_test }} from {{ source }}" + helm: + binary_path: "{{ helm_binary }}" + name: test + chart_ref: "{{ chart_source_upgrade | default(chart_source) }}" + chart_version: "{{ chart_source_version_upgrade | default(omit) }}" + namespace: "{{ helm_namespace }}" + register: install + +- name: "Assert that {{ chart_test }} chart is upgraded with new version from {{ source }}" + assert: + that: + - install is changed + - install.status.status | lower == 'deployed' + - install.status.chart == "{{ chart_test }}-{{ chart_test_version_upgrade }}" + +- name: Check idempotency after upgrade + helm: + binary_path: "{{ helm_binary }}" + name: test + chart_ref: "{{ chart_source_upgrade | default(chart_source) }}" + chart_version: "{{ chart_source_version_upgrade | default(omit) }}" + namespace: "{{ helm_namespace }}" + register: install + +- name: Assert idempotency after upgrade + assert: + that: + - install is not changed + - install.status.status | lower == 'deployed' + - install.status.chart == "{{ chart_test }}-{{ chart_test_version_upgrade }}" + +- name: "Remove {{ chart_test }} from {{ source }}" + helm: + binary_path: "{{ helm_binary }}" + state: absent + name: test + namespace: "{{ helm_namespace }}" + register: install + +- name: "Assert that {{ chart_test }} chart is removed from {{ source }}" + assert: + that: + - install is changed + +- name: Check idempotency after remove + helm: + binary_path: "{{ helm_binary }}" + state: absent + name: test + namespace: "{{ helm_namespace }}" + register: install + +- name: Assert idempotency + assert: + that: + - install is not changed + +# Test --replace +- name: Install chart for replace option + helm: + binary_path: "{{ helm_binary }}" + name: test-0001 + chart_ref: "{{ chart_source }}" + chart_version: "{{ chart_source_version | default(omit) }}" + namespace: "{{ helm_namespace }}" + register: install + +- name: "Assert that {{ chart_test }} chart is installed from {{ source }}" + assert: + that: + - install is changed + +- name: Remove {{ chart_test }} with --purge + helm: + binary_path: "{{ helm_binary }}" + state: absent + name: test-0001 + purge: False + namespace: "{{ helm_namespace }}" + register: install + +- name: Check if chart is removed + assert: + that: + - install is changed + +- name: Install chart again with same name test-0001 + helm: + binary_path: "{{ helm_binary }}" + name: test-0001 + chart_ref: "{{ chart_source }}" + chart_version: "{{ chart_source_version | default(omit) }}" + namespace: "{{ helm_namespace }}" + replace: True + register: install + +- name: "Assert that {{ chart_test }} chart is installed from {{ source }}" + assert: + that: + - install is changed + +- name: Remove {{ chart_test }} (cleanup) + helm: + binary_path: "{{ helm_binary }}" + state: absent + name: test-0001 + namespace: "{{ helm_namespace }}" + register: install + +- name: Check if chart is removed + assert: + that: + - install is changed + +- name: "Install {{ chart_test }} from {{ source }} with values_files" + helm: + binary_path: "{{ helm_binary }}" + name: test + chart_ref: "{{ chart_source }}" + chart_version: "{{ chart_source_version | default(omit) }}" + namespace: "{{ helm_namespace }}" + values_files: + - "{{ role_path }}/files/values.yaml" + register: install + +- name: "Assert that {{ chart_test }} chart has var from {{ source }}" + assert: + that: + - install is changed + - install.status.status | lower == 'deployed' + - install.status.chart == "{{ chart_test }}-{{ chart_test_version }}" + - "install.status['values'].revisionHistoryLimit == 0" + +- name: Remove helm namespace + k8s: + api_version: v1 + kind: Namespace + name: "{{ helm_namespace }}" + state: absent + wait: true + wait_timeout: 180 diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/roles/helm/tasks/tests_chart/from_local_path.yml b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/roles/helm/tasks/tests_chart/from_local_path.yml new file mode 100644 index 00000000..58409809 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/roles/helm/tasks/tests_chart/from_local_path.yml @@ -0,0 +1,88 @@ +--- +- name: Git clone stable repo + git: + repo: "{{ chart_test_git_repo }}" + dest: /tmp/helm_test_repo + version: 631eb8413f6728962439488f48d7d6fbb954a6db + +- name: Git clone stable repo upgrade + git: + repo: "{{ chart_test_git_repo }}" + dest: /tmp/helm_test_repo_upgrade + version: d37b5025ffc8be49699898369fbb59661e2a8ffb + +- name: Install Chart from local path + include_tasks: "../tests_chart.yml" + vars: + source: local_path + chart_source: "/tmp/helm_test_repo/stable/{{ chart_test }}/" + chart_source_upgrade: "/tmp/helm_test_repo_upgrade/stable/{{ chart_test }}/" + +- name: Test appVersion idempotence + vars: + chart_test: "test-chart" + chart_test_version: "0.1.0" + chart_test_version_upgrade: "0.1.0" + chart_test_app_version: "v1" + chart_test_upgrade_app_version: "v2" + block: + - name: Copy test chart + copy: + src: "{{ chart_test }}" + dest: "/tmp/helm_test_appversion/test-chart/" + + # create package with appVersion v1 + - name: "Package chart into archive with appVersion {{ chart_test_app_version }}" + command: "{{ helm_binary }} package --app-version {{ chart_test_app_version }} /tmp/helm_test_appversion/test-chart/{{ chart_test }}" + - name: "Move appVersion {{ chart_test_app_version }} chart archive" + copy: + remote_src: true + src: "test-chart-{{ chart_test_version }}.tgz" + dest: "/tmp/helm_test_appversion/test-chart/{{ chart_test }}-{{ chart_test_app_version }}-{{ chart_test_version }}.tgz" + + # create package with appVersion v2 + - name: "Package chart into archive with appVersion {{ chart_test_upgrade_app_version }}" + command: "{{ helm_binary }} package --app-version {{ chart_test_upgrade_app_version }} /tmp/helm_test_appversion/test-chart/{{ chart_test }}" + - name: "Move appVersion {{ chart_test_upgrade_app_version }} chart archive" + copy: + remote_src: true + src: "test-chart-{{ chart_test_version }}.tgz" + dest: "/tmp/helm_test_appversion/test-chart/{{ chart_test }}-{{ chart_test_upgrade_app_version }}-{{ chart_test_version }}.tgz" + + - name: Install Chart from local path + include_tasks: "../tests_chart.yml" + vars: + source: local_path + chart_source: "/tmp/helm_test_appversion/test-chart/{{ chart_test }}-{{ chart_test_app_version }}-{{ chart_test_version }}.tgz" + chart_source_upgrade: "/tmp/helm_test_appversion/test-chart/{{ chart_test }}-{{ chart_test_upgrade_app_version }}-{{ chart_test_version }}.tgz" + +- name: Test appVersion handling when null + vars: + chart_test: "appversionless-chart" + chart_test_version: "0.1.0" + chart_test_version_upgrade: "0.1.0" + block: + - name: Copy test chart + copy: + src: "{{ chart_test }}" + dest: "/tmp/helm_test_appversion/test-null/" + + # create package with appVersion v1 + - name: "Package chart into archive with appVersion v1" + command: "{{ helm_binary }} package --app-version v1 /tmp/helm_test_appversion/test-null/{{ chart_test }}" + + - name: Install Chart from local path + include_tasks: "../tests_chart.yml" + vars: + source: local_path + chart_source: "/tmp/helm_test_appversion/test-null/{{ chart_test }}/" + chart_source_upgrade: "{{ chart_test }}-{{ chart_test_version }}.tgz" + +- name: Remove clone repos + file: + path: "{{ item }}" + state: absent + with_items: + - /tmp/helm_test_repo + - /tmp/helm_test_repo_upgrade + - /tmp/helm_test_appversion diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/roles/helm/tasks/tests_chart/from_repository.yml b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/roles/helm/tasks/tests_chart/from_repository.yml new file mode 100644 index 00000000..067b216b --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/roles/helm/tasks/tests_chart/from_repository.yml @@ -0,0 +1,19 @@ +--- +- name: Add chart repo + helm_repository: + name: test_helm + repo_url: "{{ chart_test_repo }}" + +- name: Install Chart from repository + include_tasks: "../tests_chart.yml" + vars: + source: repository + chart_source: "test_helm/{{ chart_test }}" + chart_source_version: "{{ chart_test_version }}" + chart_source_version_upgrade: "{{ chart_test_version_upgrade }}" + +- name: Add chart repo + helm_repository: + name: test_helm + repo_url: "{{ chart_test_repo }}" + state: absent diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/roles/helm/tasks/tests_chart/from_url.yml b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/roles/helm/tasks/tests_chart/from_url.yml new file mode 100644 index 00000000..fd3f66c3 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/roles/helm/tasks/tests_chart/from_url.yml @@ -0,0 +1,7 @@ +--- +- name: Install Chart from URL + include_tasks: "../tests_chart.yml" + vars: + source: url + chart_source: "{{ chart_test_repo }}/{{ chart_test }}-{{ chart_test_version }}.tgz" + chart_source_upgrade: "{{ chart_test_repo }}/{{ chart_test }}-{{ chart_test_version_upgrade }}.tgz" diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/roles/helm/tasks/tests_helm_plugin.yml b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/roles/helm/tasks/tests_helm_plugin.yml new file mode 100644 index 00000000..720a06d5 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/roles/helm/tasks/tests_helm_plugin.yml @@ -0,0 +1,84 @@ +--- +- name: Install env plugin in check mode + helm_plugin: + binary_path: "{{ helm_binary }}" + namespace: "{{ helm_namespace }}" + state: present + plugin_path: https://github.com/adamreese/helm-env + register: check_install_env + check_mode: true + +- assert: + that: + - check_install_env.changed + +- name: Install env plugin + helm_plugin: + binary_path: "{{ helm_binary }}" + namespace: "{{ helm_namespace }}" + state: present + plugin_path: https://github.com/adamreese/helm-env + register: install_env + +- assert: + that: + - install_env.changed + +- name: Gather info about all plugin + helm_plugin_info: + binary_path: "{{ helm_binary }}" + namespace: "{{ helm_namespace }}" + register: plugin_info + +- assert: + that: + - plugin_info.plugin_list is defined + +- name: Install env plugin again + helm_plugin: + binary_path: "{{ helm_binary }}" + namespace: "{{ helm_namespace }}" + state: present + plugin_path: https://github.com/adamreese/helm-env + register: install_env + +- assert: + that: + - not install_env.changed + +- name: Uninstall env plugin in check mode + helm_plugin: + binary_path: "{{ helm_binary }}" + namespace: "{{ helm_namespace }}" + state: absent + plugin_name: env + register: check_uninstall_env + check_mode: true + +- assert: + that: + - check_uninstall_env.changed + +- name: Uninstall env plugin + helm_plugin: + binary_path: "{{ helm_binary }}" + namespace: "{{ helm_namespace }}" + state: absent + plugin_name: env + register: uninstall_env + +- assert: + that: + - uninstall_env.changed + +- name: Uninstall env plugin again + helm_plugin: + binary_path: "{{ helm_binary }}" + namespace: "{{ helm_namespace }}" + state: absent + plugin_name: env + register: uninstall_env + +- assert: + that: + - not uninstall_env.changed diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/roles/helm/tasks/tests_repository.yml b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/roles/helm/tasks/tests_repository.yml new file mode 100644 index 00000000..9d274819 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/roles/helm/tasks/tests_repository.yml @@ -0,0 +1,61 @@ +--- +- name: "Ensure test_helm_repo doesn't exist" + helm_repository: + name: test_helm_repo + state: absent + +- name: Add test_helm_repo chart repository + helm_repository: + name: test_helm_repo + repo_url: "{{ chart_test_repo }}" + register: repository + +- name: Assert that test_helm_repo repository is added + assert: + that: + - repository is changed + +- name: Check idempotency + helm_repository: + name: test_helm_repo + repo_url: "{{ chart_test_repo }}" + register: repository + +- name: Assert idempotency + assert: + that: + - repository is not changed + +- name: Failed to add repository with the same name + helm_repository: + name: test_helm_repo + repo_url: "https://other-charts.url" + register: repository_errors + ignore_errors: yes + +- name: Assert that adding repository with the same name failed + assert: + that: + - repository_errors is failed + +- name: Remove test_helm_repo chart repository + helm_repository: + name: test_helm_repo + state: absent + register: repository + +- name: Assert that test_helm_repo repository is removed + assert: + that: + - repository is changed + +- name: Check idempotency after remove + helm_repository: + name: test_helm_repo + state: absent + register: repository + +- name: Assert idempotency + assert: + that: + - repository is not changed diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/tasks/access_review.yml b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/tasks/access_review.yml new file mode 100644 index 00000000..78d6d567 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/tasks/access_review.yml @@ -0,0 +1,22 @@ +--- +- name: Create a SelfSubjectAccessReview resource + register: can_i_create_namespaces + ignore_errors: yes + k8s: + state: present + definition: + apiVersion: authorization.k8s.io/v1 + kind: SelfSubjectAccessReview + spec: + resourceAttributes: + group: v1 + resource: Namespace + verb: create + +- name: Assert that the SelfSubjectAccessReview request succeded + assert: + that: + - can_i_create_namespaces is successful + - can_i_create_namespaces.result.status is defined + - can_i_create_namespaces.result.status.allowed is defined + - can_i_create_namespaces.result.status.allowed diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/tasks/append_hash.yml b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/tasks/append_hash.yml new file mode 100644 index 00000000..9c726a3d --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/tasks/append_hash.yml @@ -0,0 +1,69 @@ +--- +- block: + - name: Ensure that append_hash namespace exists + k8s: + kind: Namespace + name: append-hash + + - name: Create k8s_resource variable + set_fact: + k8s_resource: + metadata: + name: config-map-test + namespace: append-hash + apiVersion: v1 + kind: ConfigMap + data: + hello: world + + - name: Create config map + k8s: + definition: "{{ k8s_resource }}" + append_hash: yes + register: k8s_configmap1 + + - name: Check configmap is created with a hash + assert: + that: + - k8s_configmap1 is changed + - k8s_configmap1.result.metadata.name != 'config-map-test' + - k8s_configmap1.result.metadata.name[:-10] == 'config-map-test-' + + - name: Recreate same config map + k8s: + definition: "{{ k8s_resource }}" + append_hash: yes + register: k8s_configmap2 + + - name: Check configmaps are different + assert: + that: + - k8s_configmap2 is not changed + - k8s_configmap1.result.metadata.name == k8s_configmap2.result.metadata.name + + - name: Add key to config map + k8s: + definition: + metadata: + name: config-map-test + namespace: append-hash + apiVersion: v1 + kind: ConfigMap + data: + hello: world + another: value + append_hash: yes + register: k8s_configmap3 + + - name: Check configmaps are different + assert: + that: + - k8s_configmap3 is changed + - k8s_configmap1.result.metadata.name != k8s_configmap3.result.metadata.name + + always: + - name: Ensure that namespace is removed + k8s: + kind: Namespace + name: append-hash + state: absent diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/tasks/apply.yml b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/tasks/apply.yml new file mode 100644 index 00000000..2f579755 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/tasks/apply.yml @@ -0,0 +1,769 @@ +--- +- block: + - set_fact: + apply_namespace: apply + + - name: Ensure namespace exists + k8s: + definition: + apiVersion: v1 + kind: Namespace + metadata: + name: "{{ apply_namespace }}" + + - name: Add a configmap + k8s: + name: "apply-configmap" + namespace: "{{ apply_namespace }}" + definition: + kind: ConfigMap + apiVersion: v1 + data: + one: "1" + two: "2" + three: "3" + apply: yes + register: k8s_configmap + + - name: Check configmap was created + assert: + that: + - k8s_configmap is changed + - k8s_configmap.result.metadata.annotations|default(False) + + - name: Add same configmap again + k8s: + definition: + kind: ConfigMap + apiVersion: v1 + metadata: + name: "apply-configmap" + namespace: "{{ apply_namespace }}" + data: + one: "1" + two: "2" + three: "3" + apply: yes + register: k8s_configmap_2 + + - name: Check nothing changed + assert: + that: + - k8s_configmap_2 is not changed + + - name: Add same configmap again with check mode on + k8s: + definition: + kind: ConfigMap + apiVersion: v1 + metadata: + name: "apply-configmap" + namespace: "{{ apply_namespace }}" + data: + one: "1" + two: "2" + three: "3" + apply: yes + check_mode: yes + register: k8s_configmap_check + + - name: Check nothing changed + assert: + that: + - k8s_configmap_check is not changed + + - name: Add same configmap again but using name and namespace args + k8s: + name: "apply-configmap" + namespace: "{{ apply_namespace }}" + definition: + kind: ConfigMap + apiVersion: v1 + data: + one: "1" + two: "2" + three: "3" + apply: yes + register: k8s_configmap_2a + + - name: Check nothing changed + assert: + that: + - k8s_configmap_2a is not changed + + - name: Update configmap + k8s: + definition: + kind: ConfigMap + apiVersion: v1 + metadata: + name: "apply-configmap" + namespace: "{{ apply_namespace }}" + data: + one: "1" + three: "3" + four: "4" + apply: yes + register: k8s_configmap_3 + + - name: Ensure that configmap has been correctly updated + assert: + that: + - k8s_configmap_3 is changed + - "'four' in k8s_configmap_3.result.data" + - "'two' not in k8s_configmap_3.result.data" + + - name: Add a service + k8s: + definition: + apiVersion: v1 + kind: Service + metadata: + name: apply-svc + namespace: "{{ apply_namespace }}" + spec: + selector: + app: whatever + ports: + - name: http + port: 8080 + targetPort: 8080 + apply: yes + register: k8s_service + + - name: Add exactly same service + k8s: + definition: + apiVersion: v1 + kind: Service + metadata: + name: apply-svc + namespace: "{{ apply_namespace }}" + spec: + selector: + app: whatever + ports: + - name: http + port: 8080 + targetPort: 8080 + apply: yes + register: k8s_service_2 + + - name: Check nothing changed + assert: + that: + - k8s_service_2 is not changed + + - name: Add exactly same service in check mode + k8s: + definition: + apiVersion: v1 + kind: Service + metadata: + name: apply-svc + namespace: "{{ apply_namespace }}" + spec: + selector: + app: whatever + ports: + - name: http + port: 8080 + targetPort: 8080 + apply: yes + register: k8s_service_3 + check_mode: yes + + - name: Check nothing changed + assert: + that: + - k8s_service_3 is not changed + + - name: Change service ports + k8s: + definition: + apiVersion: v1 + kind: Service + metadata: + name: apply-svc + namespace: "{{ apply_namespace }}" + spec: + selector: + app: whatever + ports: + - name: http + port: 8081 + targetPort: 8081 + apply: yes + register: k8s_service_4 + + - name: Check ports are correct + assert: + that: + - k8s_service_4 is changed + - k8s_service_4.result.spec.ports | length == 1 + - k8s_service_4.result.spec.ports[0].port == 8081 + + - name: Insert new service port + k8s: + definition: + apiVersion: v1 + kind: Service + metadata: + name: apply-svc + namespace: "{{ apply_namespace }}" + spec: + selector: + app: whatever + ports: + - name: mesh + port: 8080 + targetPort: 8080 + - name: http + port: 8081 + targetPort: 8081 + apply: yes + register: k8s_service_4 + + - name: Check ports are correct + assert: + that: + - k8s_service_4 is changed + - k8s_service_4.result.spec.ports | length == 2 + - k8s_service_4.result.spec.ports[0].port == 8080 + - k8s_service_4.result.spec.ports[1].port == 8081 + + - name: Remove new service port (check mode) + k8s: + definition: + apiVersion: v1 + kind: Service + metadata: + name: apply-svc + namespace: "{{ apply_namespace }}" + spec: + selector: + app: whatever + ports: + - name: http + port: 8081 + targetPort: 8081 + apply: yes + check_mode: yes + register: k8s_service_check + + - name: Check ports are correct + assert: + that: + - k8s_service_check is changed + - k8s_service_check.result.spec.ports | length == 1 + - k8s_service_check.result.spec.ports[0].port == 8081 + + - name: Remove new service port + k8s: + definition: + apiVersion: v1 + kind: Service + metadata: + name: apply-svc + namespace: "{{ apply_namespace }}" + spec: + selector: + app: whatever + ports: + - name: http + port: 8081 + targetPort: 8081 + apply: yes + register: k8s_service_5 + + - name: Check ports are correct + assert: + that: + - k8s_service_5 is changed + - k8s_service_5.result.spec.ports | length == 1 + - k8s_service_5.result.spec.ports[0].port == 8081 + + - name: Add a serviceaccount + k8s: + definition: + apiVersion: v1 + kind: ServiceAccount + metadata: + name: apply-deploy + namespace: "{{ apply_namespace }}" + + - name: Add a deployment + k8s: + definition: + apiVersion: apps/v1 + kind: Deployment + metadata: + name: apply-deploy + namespace: "{{ apply_namespace }}" + spec: + replicas: 1 + selector: + matchLabels: + app: "{{ k8s_pod_name }}" + template: "{{ k8s_pod_template }}" + wait: yes + apply: yes + vars: + k8s_pod_name: apply-deploy + k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:v0.10.0-green + k8s_pod_service_account: apply-deploy + k8s_pod_ports: + - containerPort: 8080 + name: http + protocol: TCP + k8s_pod_resources: + requests: + cpu: 100m + memory: 100Mi + limits: + cpu: 100m + memory: 100Mi + + - name: Update the earlier deployment in check mode + k8s: + definition: + apiVersion: apps/v1 + kind: Deployment + metadata: + name: apply-deploy + namespace: "{{ apply_namespace }}" + spec: + replicas: 1 + selector: + matchLabels: + app: "{{ k8s_pod_name }}" + template: "{{ k8s_pod_template }}" + wait: yes + apply: yes + check_mode: yes + vars: + k8s_pod_name: apply-deploy + k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:v0.10.0-purple + k8s_pod_service_account: apply-deploy + k8s_pod_ports: + - containerPort: 8080 + name: http + protocol: TCP + k8s_pod_resources: + requests: + cpu: 50m + limits: + cpu: 50m + memory: 50Mi + register: update_deploy_check_mode + + - name: Ensure check mode change took + assert: + that: + - update_deploy_check_mode is changed + - "update_deploy_check_mode.result.spec.template.spec.containers[0].image == 'gcr.io/kuar-demo/kuard-amd64:v0.10.0-purple'" + + - name: Update the earlier deployment + k8s: + definition: + apiVersion: apps/v1 + kind: Deployment + metadata: + name: apply-deploy + namespace: "{{ apply_namespace }}" + spec: + replicas: 1 + selector: + matchLabels: + app: "{{ k8s_pod_name }}" + template: "{{ k8s_pod_template }}" + wait: yes + apply: yes + vars: + k8s_pod_name: apply-deploy + k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:v0.10.0-purple + k8s_pod_service_account: apply-deploy + k8s_pod_ports: + - containerPort: 8080 + name: http + protocol: TCP + k8s_pod_resources: + requests: + cpu: 50m + limits: + cpu: 50m + memory: 50Mi + register: update_deploy_for_real + + - name: Ensure change took + assert: + that: + - update_deploy_for_real is changed + - "update_deploy_for_real.result.spec.template.spec.containers[0].image == 'gcr.io/kuar-demo/kuard-amd64:v0.10.0-purple'" + + - name: Remove the serviceaccount + k8s: + state: absent + definition: + apiVersion: v1 + kind: ServiceAccount + metadata: + name: apply-deploy + namespace: "{{ apply_namespace }}" + + - name: Apply deployment after service account removed + k8s: + definition: + apiVersion: apps/v1 + kind: Deployment + metadata: + name: apply-deploy + namespace: "{{ apply_namespace }}" + spec: + replicas: 1 + selector: + matchLabels: + app: "{{ k8s_pod_name }}" + template: "{{ k8s_pod_template }}" + wait: yes + apply: yes + vars: + k8s_pod_name: apply-deploy + k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:v0.10.0-green + k8s_pod_service_account: apply-deploy + k8s_pod_ports: + - containerPort: 8080 + name: http + protocol: TCP + k8s_pod_resources: + requests: + cpu: 50m + limits: + cpu: 50m + memory: 50Mi + register: deploy_after_serviceaccount_removal + ignore_errors: yes + + - name: Ensure that updating deployment after service account removal failed + assert: + that: + - deploy_after_serviceaccount_removal is failed + + - name: Insert new service port + k8s: + definition: + apiVersion: v1 + kind: Service + metadata: + name: apply-svc + namespace: "{{ apply_namespace }}" + spec: + selector: + app: whatever + ports: + - name: mesh + port: 8080 + targetPort: 8080 + - name: http + port: 8081 + targetPort: 8081 + apply: yes + register: k8s_service_4 + + - name: Check ports are correct + assert: + that: + - k8s_service_4 is changed + - k8s_service_4.result.spec.ports | length == 2 + - k8s_service_4.result.spec.ports[0].port == 8080 + - k8s_service_4.result.spec.ports[1].port == 8081 + + - name: Remove new service port (check mode) + k8s: + definition: + apiVersion: v1 + kind: Service + metadata: + name: apply-svc + namespace: "{{ apply_namespace }}" + spec: + selector: + app: whatever + ports: + - name: http + port: 8081 + targetPort: 8081 + apply: yes + check_mode: yes + register: k8s_service_check + + - name: Check ports are correct + assert: + that: + - k8s_service_check is changed + - k8s_service_check.result.spec.ports | length == 1 + - k8s_service_check.result.spec.ports[0].port == 8081 + + - name: Remove new service port + k8s: + definition: + apiVersion: v1 + kind: Service + metadata: + name: apply-svc + namespace: "{{ apply_namespace }}" + spec: + selector: + app: whatever + ports: + - name: http + port: 8081 + targetPort: 8081 + apply: yes + register: k8s_service_5 + + - name: Check ports are correct + assert: + that: + - k8s_service_5 is changed + - k8s_service_5.result.spec.ports | length == 1 + - k8s_service_5.result.spec.ports[0].port == 8081 + + - name: Add a serviceaccount + k8s: + definition: + apiVersion: v1 + kind: ServiceAccount + metadata: + name: apply-deploy + namespace: "{{ apply_namespace }}" + + - name: Add a deployment + k8s: + definition: + apiVersion: apps/v1 + kind: Deployment + metadata: + name: apply-deploy + namespace: "{{ apply_namespace }}" + spec: + replicas: 1 + selector: + matchLabels: + app: "{{ k8s_pod_name }}" + template: "{{ k8s_pod_template }}" + wait: yes + apply: yes + vars: + k8s_pod_name: apply-deploy + k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:v0.10.0-green + k8s_pod_service_account: apply-deploy + k8s_pod_ports: + - containerPort: 8080 + name: http + protocol: TCP + + - name: Remove the serviceaccount + k8s: + state: absent + definition: + apiVersion: v1 + kind: ServiceAccount + metadata: + name: apply-deploy + namespace: "{{ apply_namespace }}" + + - name: Update the earlier deployment + k8s: + definition: + apiVersion: apps/v1 + kind: Deployment + metadata: + name: apply-deploy + namespace: "{{ apply_namespace }}" + spec: + replicas: 2 + selector: + matchLabels: + app: "{{ k8s_pod_name }}" + template: "{{ k8s_pod_template }}" + wait: yes + apply: yes + vars: + k8s_pod_name: apply-deploy + k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:v0.10.0-purple + k8s_pod_service_account: apply-deploy + k8s_pod_ports: + - containerPort: 8080 + name: http + protocol: TCP + register: deploy_after_serviceaccount_removal + ignore_errors: yes + + - name: Ensure that updating deployment after service account removal failed + assert: + that: + - deploy_after_serviceaccount_removal is failed + + - name: Insert new service port + k8s: + definition: + apiVersion: v1 + kind: Service + metadata: + name: apply-svc + namespace: "{{ apply_namespace }}" + spec: + selector: + app: whatever + ports: + - name: mesh + port: 8080 + targetPort: 8080 + - name: http + port: 8081 + targetPort: 8081 + apply: yes + register: k8s_service_4 + + - name: Check ports are correct + assert: + that: + - k8s_service_4 is changed + - k8s_service_4.result.spec.ports | length == 2 + - k8s_service_4.result.spec.ports[0].port == 8080 + - k8s_service_4.result.spec.ports[1].port == 8081 + + - name: Remove new service port (check mode) + k8s: + definition: + apiVersion: v1 + kind: Service + metadata: + name: apply-svc + namespace: "{{ apply_namespace }}" + spec: + selector: + app: whatever + ports: + - name: http + port: 8081 + targetPort: 8081 + apply: yes + check_mode: yes + register: k8s_service_check + + - name: Check ports are correct + assert: + that: + - k8s_service_check is changed + - k8s_service_check.result.spec.ports | length == 1 + - k8s_service_check.result.spec.ports[0].port == 8081 + + - name: Remove new service port + k8s: + definition: + apiVersion: v1 + kind: Service + metadata: + name: apply-svc + namespace: "{{ apply_namespace }}" + spec: + selector: + app: whatever + ports: + - name: http + port: 8081 + targetPort: 8081 + apply: yes + register: k8s_service_5 + + - name: Check ports are correct + assert: + that: + - k8s_service_5 is changed + - k8s_service_5.result.spec.ports | length == 1 + - k8s_service_5.result.spec.ports[0].port == 8081 + + - name: Add a serviceaccount + k8s: + definition: + apiVersion: v1 + kind: ServiceAccount + metadata: + name: apply-deploy + namespace: "{{ apply_namespace }}" + + - name: Add a deployment + k8s: + definition: + apiVersion: apps/v1 + kind: Deployment + metadata: + name: apply-deploy + namespace: "{{ apply_namespace }}" + spec: + replicas: 1 + selector: + matchLabels: + app: "{{ k8s_pod_name }}" + template: "{{ k8s_pod_template }}" + wait: yes + apply: yes + vars: + k8s_pod_name: apply-deploy + k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:v0.10.0-green + k8s_pod_service_account: apply-deploy + k8s_pod_ports: + - containerPort: 8080 + name: http + protocol: TCP + + - name: Remove the serviceaccount + k8s: + state: absent + definition: + apiVersion: v1 + kind: ServiceAccount + metadata: + name: apply-deploy + namespace: "{{ apply_namespace }}" + + - name: Update the earlier deployment + k8s: + definition: + apiVersion: apps/v1 + kind: Deployment + metadata: + name: apply-deploy + namespace: "{{ apply_namespace }}" + spec: + replicas: 2 + selector: + matchLabels: + app: "{{ k8s_pod_name }}" + template: "{{ k8s_pod_template }}" + wait: yes + apply: yes + vars: + k8s_pod_name: apply-deploy + k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:v0.10.0-purple + k8s_pod_service_account: apply-deploy + k8s_pod_ports: + - containerPort: 8080 + name: http + protocol: TCP + register: deploy_after_serviceaccount_removal + ignore_errors: yes + + - name: Ensure that updating deployment after service account removal failed + assert: + that: + - deploy_after_serviceaccount_removal is failed + + always: + - name: Remove namespace + k8s: + kind: Namespace + name: "{{ apply_namespace }}" + state: absent diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/tasks/cluster_info.yml b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/tasks/cluster_info.yml new file mode 100644 index 00000000..644de153 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/tasks/cluster_info.yml @@ -0,0 +1,22 @@ +--- +- name: Get Information about All APIs + k8s_cluster_info: + register: api_details + +- name: Print all APIs for debugging + debug: + msg: "{{ api_details.apis }}" + +- name: Get core API version + set_fact: + crd: "{{ api_details.apis['apiextensions.k8s.io'] }}" + host: "{{ api_details.connection['host'] }}" + client_version: "{{ api_details.version['client'] }}" + +- name: Check if all APIs are present + assert: + that: + - api_details.apis is defined + - crd is defined + - host is defined + - client_version is defined diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/tasks/crd.yml b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/tasks/crd.yml new file mode 100644 index 00000000..9b1f5e89 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/tasks/crd.yml @@ -0,0 +1,66 @@ +--- +- block: + - name: Create a namespace + k8s: + name: crd + kind: Namespace + + - name: Install custom resource definitions + k8s: + definition: "{{ lookup('file', kubernetes_role_path + '/files/setup-crd.yml') }}" + + - name: Pause 5 seconds to avoid race condition + pause: + seconds: 5 + + - name: Create custom resource definition + k8s: + definition: "{{ lookup('file', kubernetes_role_path + '/files/crd-resource.yml') }}" + namespace: crd + apply: "{{ create_crd_with_apply | default(omit) }}" + register: create_crd + + - name: Patch custom resource definition + k8s: + definition: "{{ lookup('file', kubernetes_role_path + '/files/crd-resource.yml') }}" + namespace: crd + register: recreate_crd + ignore_errors: yes + + - name: Assert that recreating crd is as expected + assert: + that: + - recreate_crd is not failed + + - block: + - name: Recreate custom resource definition with merge_type + k8s: + definition: "{{ lookup('file', kubernetes_role_path + '/files/crd-resource.yml') }}" + merge_type: merge + namespace: crd + register: recreate_crd_with_merge + + - name: Recreate custom resource definition with merge_type list + k8s: + definition: "{{ lookup('file', kubernetes_role_path + '/files/crd-resource.yml') }}" + merge_type: + - strategic-merge + - merge + namespace: crd + register: recreate_crd_with_merge_list + when: recreate_crd is successful + + + - name: Remove crd + k8s: + definition: "{{ lookup('file', kubernetes_role_path + '/files/crd-resource.yml') }}" + namespace: crd + state: absent + + always: + - name: Remove crd namespace + k8s: + kind: Namespace + name: crd + state: absent + ignore_errors: yes diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/tasks/delete.yml b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/tasks/delete.yml new file mode 100644 index 00000000..e49ff221 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/tasks/delete.yml @@ -0,0 +1,95 @@ +--- +- block: + - set_fact: + delete_namespace: delete + + - name: Ensure namespace exists + k8s: + definition: + apiVersion: v1 + kind: Namespace + metadata: + name: "{{ delete_namespace }}" + + - name: Add a daemonset + k8s: + definition: + apiVersion: apps/v1 + kind: DaemonSet + metadata: + name: delete-daemonset + namespace: "{{ delete_namespace }}" + spec: + selector: + matchLabels: + app: "{{ k8s_pod_name }}" + template: "{{ k8s_pod_template }}" + wait: yes + wait_timeout: 180 + vars: + k8s_pod_name: delete-ds + k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:1 + register: ds + + - name: Check that daemonset wait worked + assert: + that: + - ds.result.status.currentNumberScheduled == ds.result.status.desiredNumberScheduled + + - name: Check if pods exist + k8s_info: + namespace: "{{ delete_namespace }}" + kind: Pod + label_selectors: + - "app={{ k8s_pod_name }}" + vars: + k8s_pod_name: delete-ds + register: pods_create + + - name: Assert that there are pods + assert: + that: + - pods_create.resources + + - name: Remove the daemonset + k8s: + kind: DaemonSet + name: delete-daemonset + namespace: "{{ delete_namespace }}" + state: absent + wait: yes + + - name: Show status of pods + k8s_info: + namespace: "{{ delete_namespace }}" + kind: Pod + label_selectors: + - "app={{ k8s_pod_name }}" + vars: + k8s_pod_name: delete-ds + + - name: Wait for background deletion + pause: + seconds: 30 + + - name: Check if pods still exist + k8s_info: + namespace: "{{ delete_namespace }}" + kind: Pod + label_selectors: + - "app={{ k8s_pod_name }}" + vars: + k8s_pod_name: delete-ds + register: pods_delete + + - name: Assert that deleting the daemonset deleted the pods + assert: + that: + - not pods_delete.resources + + always: + - name: Remove namespace + k8s: + kind: Namespace + name: "{{ delete_namespace }}" + state: absent diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/tasks/exec.yml b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/tasks/exec.yml new file mode 100644 index 00000000..5397ab95 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/tasks/exec.yml @@ -0,0 +1,64 @@ +--- +- vars: + exec_namespace: k8s-exec + pod: sleep-pod + exec_pod_definition: + apiVersion: v1 + kind: Pod + metadata: + name: "{{ pod }}" + namespace: "{{ exec_namespace }}" + spec: + containers: + - name: sleeper + image: busybox + command: ["sleep", "infinity"] + + block: + - name: "Ensure that {{ exec_namespace }} namespace exists" + k8s: + kind: Namespace + name: "{{ exec_namespace }}" + + - name: "Create a pod" + k8s: + definition: "{{ exec_pod_definition }}" + wait: yes + wait_sleep: 1 + wait_timeout: 30 + + - name: "Execute a command" + k8s_exec: + pod: "{{ pod }}" + namespace: "{{ exec_namespace }}" + command: cat /etc/resolv.conf + register: output + + - name: "Show k8s_exec output" + debug: + var: output + + - name: "Assert k8s_exec output is correct" + assert: + that: + - "'nameserver' in output.stdout" + + - name: Check if rc is returned for the given command + k8s_exec: + namespace: "{{ exec_namespace }}" + pod: "{{ pod }}" + command: 'false' + register: command_status + ignore_errors: True + + - name: Check last command status + assert: + that: + - command_status.return_code != 0 + + always: + - name: "Cleanup namespace" + k8s: + kind: Namespace + name: "{{ exec_namespace }}" + state: absent diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/tasks/full.yml b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/tasks/full.yml new file mode 100644 index 00000000..d2666797 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/tasks/full.yml @@ -0,0 +1,373 @@ +--- +- block: + - name: Create a namespace + k8s: + name: testing + kind: Namespace + register: output + + - name: Show output + debug: + var: output + + # TODO: See https://github.com/ansible-collections/community.kubernetes/issues/24 + # - name: Setting validate_certs to true causes a failure + # k8s: + # name: testing + # kind: Namespace + # validate_certs: yes + # ignore_errors: yes + # register: output + # + # - name: assert that validate_certs caused a failure (and therefore was correctly translated to verify_ssl) + # assert: + # that: + # - output is failed + + - name: Ensure k8s_info works with empty resources + k8s_info: + kind: Deployment + namespace: testing + api_version: apps/v1 + register: k8s_info + + - name: Assert that k8s_info is in correct format + assert: + that: + - "'resources' in k8s_info" + - not k8s_info.resources + + - name: Create a service + k8s: + state: present + resource_definition: &svc + apiVersion: v1 + kind: Service + metadata: + name: web + namespace: testing + labels: + app: galaxy + service: web + spec: + selector: + app: galaxy + service: web + ports: + - protocol: TCP + targetPort: 8000 + name: port-8000-tcp + port: 8000 + register: output + + - name: Show output + debug: + var: output + + - name: Create the service again + k8s: + state: present + resource_definition: *svc + register: output + + - name: Service creation should be idempotent + assert: + that: not output.changed + + - name: Create a ConfigMap + k8s: + kind: ConfigMap + name: test-force-update + namespace: testing + definition: + data: + key: value + + - name: Force update ConfigMap + k8s: + kind: ConfigMap + name: test-force-update + namespace: testing + definition: + data: + key: newvalue + force: yes + + - name: Create PVC + k8s: + state: present + inline: &pvc + apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + name: elastic-volume + namespace: testing + spec: + resources: + requests: + storage: 5Gi + accessModes: + - ReadWriteOnce + + - name: Show output + debug: + var: output + + - name: Create the PVC again + k8s: + state: present + inline: *pvc + + - name: Ensure PVC creation is idempotent + assert: + that: not output.changed + + - name: Create deployment + k8s: + state: present + inline: &deployment + apiVersion: apps/v1 + kind: Deployment + metadata: + name: elastic + labels: + app: galaxy + service: elastic + namespace: testing + spec: + replicas: 1 + selector: + matchLabels: + app: galaxy + service: elastic + template: + metadata: + labels: + app: galaxy + service: elastic + spec: + containers: + - name: elastic + volumeMounts: + - mountPath: /usr/share/elasticsearch/data + name: elastic-volume + command: ['elasticsearch'] + image: 'ansible/galaxy-elasticsearch:2.4.6' + volumes: + - name: elastic-volume + persistentVolumeClaim: + claimName: elastic-volume + strategy: + type: RollingUpdate + register: output + + - name: Show output + debug: + var: output + + - name: Create deployment again + k8s: + state: present + inline: *deployment + register: output + + - name: Ensure Deployment creation is idempotent + assert: + that: not output.changed + + ### Type tests + - name: Create a namespace from a string + k8s: + definition: |+ + --- + kind: Namespace + apiVersion: v1 + metadata: + name: testing1 + + ### https://github.com/ansible-collections/community.kubernetes/issues/111 + - set_fact: + api_groups: "{{ lookup('k8s', cluster_info='api_groups') }}" + + - debug: + var: api_groups + + - name: Namespace should exist + k8s_info: + kind: Namespace + api_version: v1 + name: testing1 + register: k8s_info_testing1 + failed_when: not k8s_info_testing1.resources or k8s_info_testing1.resources[0].status.phase != "Active" + + - name: Create resources from a multidocument yaml string + k8s: + definition: |+ + --- + kind: Namespace + apiVersion: v1 + metadata: + name: testing2 + --- + kind: Namespace + apiVersion: v1 + metadata: + name: testing3 + + - name: Lookup namespaces + k8s_info: + api_version: v1 + kind: Namespace + name: "{{ item }}" + loop: + - testing2 + - testing3 + register: k8s_namespaces + + - name: Resources should exist + assert: + that: item.resources[0].status.phase == 'Active' + loop: "{{ k8s_namespaces.results }}" + + - name: Delete resources from a multidocument yaml string + k8s: + state: absent + definition: |+ + --- + kind: Namespace + apiVersion: v1 + metadata: + name: testing2 + --- + kind: Namespace + apiVersion: v1 + metadata: + name: testing3 + + - name: Lookup namespaces + k8s_info: + api_version: v1 + kind: Namespace + name: "{{ item }}" + loop: + - testing2 + - testing3 + register: k8s_namespaces + + - name: Resources should not exist + assert: + that: + - not item.resources or item.resources[0].status.phase == "Terminating" + loop: "{{ k8s_namespaces.results }}" + + - name: Create resources from a list + k8s: + definition: + - kind: Namespace + apiVersion: v1 + metadata: + name: testing4 + - kind: Namespace + apiVersion: v1 + metadata: + name: testing5 + + - name: Lookup namespaces + k8s_info: + api_version: v1 + kind: Namespace + name: "{{ item }}" + loop: + - testing4 + - testing5 + register: k8s_namespaces + + - name: Resources should exist + assert: + that: item.resources[0].status.phase == 'Active' + loop: "{{ k8s_namespaces.results }}" + + - name: Delete resources from a list + k8s: + state: absent + definition: + - kind: Namespace + apiVersion: v1 + metadata: + name: testing4 + - kind: Namespace + apiVersion: v1 + metadata: + name: testing5 + + - name: Get info about terminating resources + k8s_info: + api_version: v1 + kind: Namespace + name: "{{ item }}" + loop: + - testing4 + - testing5 + register: k8s_info + + - name: Ensure resources are terminating if still in results + assert: + that: not item.resources or item.resources[0].status.phase == "Terminating" + loop: "{{ k8s_info.results }}" + + - name: Create resources from a yaml string ending with --- + k8s: + definition: |+ + --- + kind: Namespace + apiVersion: v1 + metadata: + name: testing6 + --- + + - name: Namespace should exist + k8s_info: + kind: Namespace + api_version: v1 + name: testing6 + register: k8s_info_testing6 + failed_when: not k8s_info_testing6.resources or k8s_info_testing6.resources[0].status.phase != "Active" + + - include_tasks: crd.yml + - include_tasks: lists.yml + - include_tasks: append_hash.yml + + always: + - name: Delete all namespaces + k8s: + state: absent + definition: + - kind: Namespace + apiVersion: v1 + metadata: + name: testing + - kind: Namespace + apiVersion: v1 + metadata: + name: testing1 + - kind: Namespace + apiVersion: v1 + metadata: + name: testing2 + - kind: Namespace + apiVersion: v1 + metadata: + name: testing3 + - kind: Namespace + apiVersion: v1 + metadata: + name: testing4 + - kind: Namespace + apiVersion: v1 + metadata: + name: testing5 + - kind: Namespace + apiVersion: v1 + metadata: + name: testing6 + ignore_errors: yes diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/tasks/info.yml b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/tasks/info.yml new file mode 100644 index 00000000..2b7fedaf --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/tasks/info.yml @@ -0,0 +1,167 @@ +--- +- block: + - set_fact: + wait_namespace: wait + k8s_pod_name: pod-info-1 + multi_pod_one: multi-pod-1 + multi_pod_two: multi-pod-2 + + - name: Ensure namespace exists + k8s: + definition: + apiVersion: v1 + kind: Namespace + metadata: + name: "{{ wait_namespace }}" + + - name: Add a simple pod with initContainer + k8s: + definition: + apiVersion: v1 + kind: Pod + metadata: + name: "{{ k8s_pod_name }}" + namespace: "{{ wait_namespace }}" + spec: + initContainers: + - name: init-01 + image: python:3.7-alpine + command: ['sh', '-c', 'sleep 20'] + containers: + - name: utilitypod-01 + image: python:3.7-alpine + command: ['sh', '-c', 'sleep 360'] + + - name: Wait and gather information about new pod + k8s_info: + name: "{{ k8s_pod_name }}" + kind: Pod + namespace: "{{ wait_namespace }}" + wait: yes + wait_sleep: 5 + wait_timeout: 400 + register: wait_info + + - name: Assert that pod creation succeeded + assert: + that: + - wait_info is successful + - not wait_info.changed + - wait_info.resources[0].status.phase == "Running" + + - name: Remove Pod + k8s: + api_version: v1 + kind: Pod + name: "{{ k8s_pod_name }}" + namespace: "{{ wait_namespace }}" + state: absent + wait: yes + ignore_errors: yes + register: short_wait_remove_pod + + - name: Check if pod is removed + assert: + that: + - short_wait_remove_pod is successful + - short_wait_remove_pod.changed + + - name: Create multiple pod with initContainer + k8s: + definition: + apiVersion: v1 + kind: Pod + metadata: + labels: + run: multi-box + name: "{{ multi_pod_one }}" + namespace: "{{ wait_namespace }}" + spec: + initContainers: + - name: init-01 + image: python:3.7-alpine + command: ['sh', '-c', 'sleep 25'] + containers: + - name: multi-pod-01 + image: python:3.7-alpine + command: ['sh', '-c', 'sleep 360'] + + - name: Create another pod with same label as previous pod + k8s: + definition: + apiVersion: v1 + kind: Pod + metadata: + labels: + run: multi-box + name: "{{ multi_pod_two }}" + namespace: "{{ wait_namespace }}" + spec: + initContainers: + - name: init-02 + image: python:3.7-alpine + command: ['sh', '-c', 'sleep 25'] + containers: + - name: multi-pod-02 + image: python:3.7-alpine + command: ['sh', '-c', 'sleep 360'] + + - name: Wait and gather information about new pods + k8s_info: + kind: Pod + namespace: "{{ wait_namespace }}" + wait: yes + wait_sleep: 5 + wait_timeout: 400 + label_selectors: + - run == multi-box + register: wait_info + + - name: Assert that pod creation succeeded + assert: + that: + - wait_info is successful + - not wait_info.changed + - wait_info.resources[0].status.phase == "Running" + - wait_info.resources[1].status.phase == "Running" + + - name: "Remove Pod {{ multi_pod_one }}" + k8s: + api_version: v1 + kind: Pod + name: "{{ multi_pod_one }}" + namespace: "{{ wait_namespace }}" + state: absent + wait: yes + ignore_errors: yes + register: multi_pod_one_remove + + - name: "Check if {{ multi_pod_one }} pod is removed" + assert: + that: + - multi_pod_one_remove is successful + - multi_pod_one_remove.changed + + - name: "Remove Pod {{ multi_pod_two }}" + k8s: + api_version: v1 + kind: Pod + name: "{{ multi_pod_two }}" + namespace: "{{ wait_namespace }}" + state: absent + wait: yes + ignore_errors: yes + register: multi_pod_two_remove + + - name: "Check if {{ multi_pod_two }} pod is removed" + assert: + that: + - multi_pod_two_remove is successful + - multi_pod_two_remove.changed + + always: + - name: Remove namespace + k8s: + kind: Namespace + name: "{{ wait_namespace }}" + state: absent diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/tasks/lists.yml b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/tasks/lists.yml new file mode 100644 index 00000000..9538d011 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/tasks/lists.yml @@ -0,0 +1,139 @@ +--- +- name: Ensure testing1 namespace exists + k8s: + api_version: v1 + kind: Namespace + name: testing1 + +- block: + - name: Create configmaps + k8s: + namespace: testing1 + definition: + apiVersion: v1 + kind: ConfigMapList + items: '{{ configmaps }}' + + - name: Get ConfigMaps + k8s_info: + api_version: v1 + kind: ConfigMap + namespace: testing1 + label_selectors: + - app=test + register: cms + + - name: All three configmaps should exist + assert: + that: item.data.a is defined + with_items: '{{ cms.resources }}' + + - name: Delete configmaps + k8s: + state: absent + namespace: testing1 + definition: + apiVersion: v1 + kind: ConfigMapList + items: '{{ configmaps }}' + + - name: Get ConfigMaps + k8s_info: + api_version: v1 + kind: ConfigMap + namespace: testing1 + label_selectors: + - app=test + register: cms + + - name: All three configmaps should not exist + assert: + that: not cms.resources + vars: + configmaps: + - metadata: + name: list-example-1 + labels: + app: test + data: + a: first + - metadata: + name: list-example-2 + labels: + app: test + data: + a: second + - metadata: + name: list-example-3 + labels: + app: test + data: + a: third + +- block: + - name: Create list of arbitrary resources + k8s: + namespace: testing1 + definition: + apiVersion: v1 + kind: List + namespace: testing1 + items: '{{ resources }}' + + - name: Get the created resources + k8s_info: + api_version: '{{ item.apiVersion }}' + kind: '{{ item.kind }}' + namespace: testing1 + name: '{{ item.metadata.name }}' + register: list_resources + with_items: '{{ resources }}' + + - name: All resources should exist + assert: + that: ((list_resources.results | sum(attribute="resources", start=[])) | length) == (resources | length) + + - name: Delete list of arbitrary resources + k8s: + state: absent + namespace: testing1 + definition: + apiVersion: v1 + kind: List + namespace: testing1 + items: '{{ resources }}' + + - name: Get the resources + k8s_info: + api_version: '{{ item.apiVersion }}' + kind: '{{ item.kind }}' + namespace: testing1 + name: '{{ item.metadata.name }}' + register: list_resources + with_items: '{{ resources }}' + + - name: The resources should not exist + assert: + that: not ((list_resources.results | sum(attribute="resources", start=[])) | length) + vars: + resources: + - apiVersion: v1 + kind: ConfigMap + metadata: + name: list-example-4 + data: + key: value + - apiVersion: v1 + kind: Service + metadata: + name: list-example-svc + labels: + app: test + spec: + selector: + app: test + ports: + - protocol: TCP + targetPort: 8000 + name: port-8000-tcp + port: 8000 diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/tasks/log.yml b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/tasks/log.yml new file mode 100644 index 00000000..d3da05d3 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/tasks/log.yml @@ -0,0 +1,124 @@ +--- +- block: + - name: ensure that k8s-log namespace exists + k8s: + kind: Namespace + name: k8s-log + + - name: create hello-world deployment + k8s: + wait: yes + definition: + apiVersion: apps/v1 + kind: Deployment + metadata: + name: hello-world + namespace: k8s-log + spec: + selector: + matchLabels: + app: hello-world + template: + metadata: + labels: + app: hello-world + spec: + containers: + - image: busybox + name: hello-world + command: ['sh'] + args: ['-c', 'while true ; do echo "hello world" && sleep 10 ; done'] + restartPolicy: Always + + - name: retrieve the log by providing the deployment + k8s_log: + api_version: apps/v1 + kind: Deployment + namespace: k8s-log + name: hello-world + register: deployment_log + + - name: verify that the log can be retrieved via the deployment + assert: + that: + - "'hello world' in deployment_log.log" + - item == 'hello world' or item == '' + with_items: '{{ deployment_log.log_lines }}' + + - name: retrieve the log with a label selector + k8s_log: + namespace: k8s-log + label_selectors: + - 'app=hello-world' + register: label_selector_log + + - name: verify that the log can be retrieved via the label + assert: + that: + - "'hello world' in label_selector_log.log" + - item == 'hello world' or item == '' + with_items: '{{ label_selector_log.log_lines }}' + + - name: get the hello-world pod + k8s_info: + kind: Pod + namespace: k8s-log + label_selectors: + - 'app=hello-world' + register: k8s_log_pods + + - name: retrieve the log directly with the pod name + k8s_log: + namespace: k8s-log + name: '{{ k8s_log_pods.resources.0.metadata.name }}' + register: pod_log + + - name: verify that the log can be retrieved via the pod name + assert: + that: + - "'hello world' in pod_log.log" + - item == 'hello world' or item == '' + with_items: '{{ pod_log.log_lines }}' + + - name: Create a job that calculates 7 + k8s: + state: present + wait: yes + wait_timeout: 120 + wait_condition: + type: Complete + status: 'True' + definition: + apiVersion: batch/v1 + kind: Job + metadata: + name: int-log + namespace: k8s-log + spec: + template: + spec: + containers: + - name: busybox + image: busybox + command: ["echo", "7"] + restartPolicy: Never + backoffLimit: 4 + + - name: retrieve logs from the job + k8s_log: + api_version: batch/v1 + kind: Job + namespace: k8s-log + name: int-log + register: job_logs + + - name: verify the log was successfully retrieved + assert: + that: job_logs.log_lines[0] == "7" + + always: + - name: ensure that namespace is removed + k8s: + kind: Namespace + name: k8s-log + state: absent diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/tasks/rollback.yml b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/tasks/rollback.yml new file mode 100644 index 00000000..743ff53c --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/tasks/rollback.yml @@ -0,0 +1,217 @@ +--- +- block: + - name: Set variables + set_fact: + namespace: "testingrollback" + + - name: Create a namespace + k8s: + name: "{{ namespace }}" + kind: Namespace + api_version: v1 + apply: no + register: output + + - name: show output + debug: + var: output + + - name: Create a deployment + k8s: + state: present + wait: yes + inline: &deploy + apiVersion: apps/v1 + kind: Deployment + metadata: + name: nginx-deploy + labels: + app: nginx + namespace: "{{ namespace }}" + spec: + replicas: 3 + selector: + matchLabels: + app: nginx + template: + metadata: + labels: + app: nginx + spec: + containers: + - name: nginx + image: nginx:1.17 + ports: + - containerPort: 80 + register: output + + - name: Show output + debug: + var: output + + - name: Crash the existing deployment + k8s: + state: present + wait: yes + definition: + apiVersion: apps/v1 + kind: Deployment + metadata: + name: nginx-deploy + labels: + app: nginx + namespace: "{{ namespace }}" + spec: + replicas: 3 + selector: + matchLabels: + app: nginx + template: + metadata: + labels: + app: nginx + spec: + containers: + - name: nginx + image: nginx:1.0.23449928384992872784 + ports: + - containerPort: 80 + ignore_errors: yes + register: output + + - name: Rolling Back the crashed deployment + k8s_rollback: + api_version: apps/v1 + kind: Deployment + name: nginx-deploy + namespace: "{{ namespace }}" + when: output.failed + register: output + + - name: Show output + debug: + var: output + + - name: Create a DaemonSet + k8s: + state: present + wait: yes + definition: + apiVersion: apps/v1 + kind: DaemonSet + metadata: + name: fluentd-elasticsearch + namespace: "{{ namespace }}" + labels: + k8s-app: fluentd-logging + spec: + selector: + matchLabels: + name: fluentd-elasticsearch + template: + metadata: + labels: + name: fluentd-elasticsearch + spec: + tolerations: + - key: node-role.kubernetes.io/master + effect: NoSchedule + containers: + - name: fluentd-elasticsearch + image: quay.io/fluentd_elasticsearch/fluentd:v2.5.2 + resources: + limits: + memory: 200Mi + requests: + cpu: 100m + memory: 200Mi + volumeMounts: + - name: varlog + mountPath: /var/log + - name: varlibdockercontainers + mountPath: /var/lib/docker/containers + readOnly: true + terminationGracePeriodSeconds: 30 + volumes: + - name: varlog + hostPath: + path: /var/log + - name: varlibdockercontainers + hostPath: + path: /var/lib/docker/containers + register: output + + - name: Show output + debug: + var: output + + - name: Crash the existing DaemonSet + k8s: + state: present + wait: yes + definition: + apiVersion: apps/v1 + kind: DaemonSet + metadata: + name: fluentd-elasticsearch + namespace: "{{ namespace }}" + labels: + k8s-app: fluentd-logging + spec: + selector: + matchLabels: + name: fluentd-elasticsearch + template: + metadata: + labels: + name: fluentd-elasticsearch + spec: + tolerations: + - key: node-role.kubernetes.io/master + effect: NoSchedule + containers: + - name: fluentd-elasticsearch + image: quay.io/fluentd_elasticsearch/fluentd:v2734894949 + resources: + limits: + memory: 200Mi + requests: + cpu: 100m + memory: 200Mi + volumeMounts: + - name: varlog + mountPath: /var/log + - name: varlibdockercontainers + mountPath: /var/lib/docker/containers + readOnly: true + terminationGracePeriodSeconds: 30 + volumes: + - name: varlog + hostPath: + path: /var/log + - name: varlibdockercontainers + hostPath: + path: /var/lib/docker/containers + ignore_errors: yes + register: output + + - name: Rolling Back the crashed DaemonSet + k8s_rollback: + api_version: apps/v1 + kind: DaemonSet + name: fluentd-elasticsearch + namespace: "{{ namespace }}" + when: output.failed + register: output + + - name: Show output + debug: + var: output + + always: + - name: Delete {{ namespace }} namespace + k8s: + name: "{{ namespace }}" + kind: Namespace + api_version: v1 + state: absent diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/tasks/scale.yml b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/tasks/scale.yml new file mode 100644 index 00000000..32b718df --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/tasks/scale.yml @@ -0,0 +1,210 @@ +--- +- block: + - set_fact: + scale_namespace: scale + + - name: Ensure namespace exists + k8s: + definition: + apiVersion: v1 + kind: Namespace + metadata: + name: "{{ scale_namespace }}" + + - name: Add a deployment + k8s: + definition: + apiVersion: apps/v1 + kind: Deployment + metadata: + name: scale-deploy + namespace: "{{ scale_namespace }}" + spec: + replicas: 1 + selector: + matchLabels: + app: "{{ k8s_pod_name }}" + template: "{{ k8s_pod_template }}" + wait: yes + wait_timeout: 60 + apply: yes + vars: + k8s_pod_name: scale-deploy + k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:v0.10.0-green + k8s_pod_ports: + - containerPort: 8080 + name: http + protocol: TCP + + - name: Get pods in scale-deploy + k8s_info: + kind: Pod + label_selectors: + - app=scale-deploy + namespace: "{{ scale_namespace }}" + field_selectors: + - status.phase=Running + + - name: Scale the deployment + k8s_scale: + api_version: apps/v1 + kind: Deployment + name: scale-deploy + namespace: "{{ scale_namespace }}" + replicas: 0 + wait: yes + register: scale_down + + - name: Get pods in scale-deploy + k8s_info: + kind: Pod + label_selectors: + - app=scale-deploy + namespace: "{{ scale_namespace }}" + field_selectors: + - status.phase=Running + register: scale_down_deploy_pods + until: "{{ scale_down_deploy_pods.resources | length == 0 }}" + retries: 6 + delay: 5 + + - name: Ensure that scale down took effect + assert: + that: + - scale_down is changed + - '"duration" in scale_down' + - scale_down.diff + + - name: Reapply the earlier deployment + k8s: + definition: + api_version: apps/v1 + kind: Deployment + metadata: + name: scale-deploy + namespace: "{{ scale_namespace }}" + spec: + replicas: 1 + selector: + matchLabels: + app: "{{ k8s_pod_name }}" + template: "{{ k8s_pod_template }}" + wait: yes + wait_timeout: 60 + apply: yes + vars: + k8s_pod_name: scale-deploy + k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:v0.10.0-green + k8s_pod_ports: + - containerPort: 8080 + name: http + protocol: TCP + register: reapply_after_scale + + - name: Get pods in scale-deploy + k8s_info: + kind: Pod + label_selectors: + - app=scale-deploy + namespace: "{{ scale_namespace }}" + field_selectors: + - status.phase=Running + register: scale_up_deploy_pods + + - name: Ensure that reapply after scale worked + assert: + that: + - reapply_after_scale is changed + - scale_up_deploy_pods.resources | length == 1 + + - name: Scale the deployment up + k8s_scale: + api_version: apps/v1 + kind: Deployment + name: scale-deploy + namespace: "{{ scale_namespace }}" + replicas: 2 + wait: yes + wait_timeout: 60 + register: scale_up + + - name: Get pods in scale-deploy + k8s_info: + kind: Pod + label_selectors: + - app=scale-deploy + field_selectors: + - status.phase=Running + namespace: "{{ scale_namespace }}" + register: scale_up_further_deploy_pods + + - name: Ensure that scale up worked + assert: + that: + - scale_up is changed + - '"duration" in scale_up' + - scale_up.diff + - scale_up_further_deploy_pods.resources | length == 2 + + - name: Don't scale the deployment up + k8s_scale: + api_version: apps/v1 + kind: Deployment + name: scale-deploy + namespace: "{{ scale_namespace }}" + replicas: 2 + wait: yes + register: scale_up_noop + + - name: Get pods in scale-deploy + k8s_info: + kind: Pod + label_selectors: + - app=scale-deploy + field_selectors: + - status.phase=Running + namespace: "{{ scale_namespace }}" + register: scale_up_noop_pods + + - name: Ensure that no-op scale up worked + assert: + that: + - scale_up_noop is not changed + - not scale_up_noop.diff + - scale_up_noop_pods.resources | length == 2 + - '"duration" in scale_up_noop' + + - name: Scale deployment down without wait + k8s_scale: + api_version: apps/v1 + kind: Deployment + name: scale-deploy + namespace: "{{ scale_namespace }}" + replicas: 1 + wait: no + register: scale_down_no_wait + + - name: Ensure that scale down succeeds + k8s_info: + kind: Pod + label_selectors: + - app=scale-deploy + namespace: "{{ scale_namespace }}" + register: scale_down_no_wait_pods + retries: 6 + delay: 5 + until: "{{ scale_down_no_wait_pods.resources | length == 1 }}" + + - name: Ensure that scale down without wait worked + assert: + that: + - scale_down_no_wait is changed + - scale_down_no_wait.diff + - scale_down_no_wait_pods.resources | length == 1 + + always: + - name: Remove namespace + k8s: + kind: Namespace + name: "{{ scale_namespace }}" + state: absent diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/tasks/template.yml b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/tasks/template.yml new file mode 100644 index 00000000..4d76d799 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/tasks/template.yml @@ -0,0 +1,167 @@ +--- +- block: + - set_fact: + template_namespace: template-test + + - name: Ensure namespace exists + k8s: + definition: + apiVersion: v1 + kind: Namespace + metadata: + name: "{{ template_namespace }}" + + - name: Check if k8s_service does not inherit parameter + community.kubernetes.k8s_service: + template: "pod_template_one.j2" + state: present + ignore_errors: yes + register: r + + - name: Check for expected failures in last tasks + assert: + that: + - r.failed + - "'is only supported parameter for' in r.msg" + + - name: Specify both definition and template + community.kubernetes.k8s: + state: present + template: "pod_template_one.j2" + definition: + apiVersion: apps/v1 + kind: Deployment + metadata: + name: apply-deploy + namespace: "{{ template_namespace }}" + spec: + replicas: 1 + selector: + matchLabels: + app: "{{ k8s_pod_name }}" + vars: + k8s_pod_name: pod + k8s_pod_namespace: "{{ template_namespace }}" + register: r + ignore_errors: yes + + - name: Check if definition and template are mutually exclusive + assert: + that: + - r.failed + - "'parameters are mutually exclusive' in r.msg" + + - name: Specify both src and template + community.kubernetes.k8s: + state: present + src: "../templates/pod_template_one.j2" + template: "pod_template_one.j2" + vars: + k8s_pod_name: pod + k8s_pod_namespace: "{{ template_namespace }}" + register: r + ignore_errors: yes + + - name: Check if src and template are mutually exclusive + assert: + that: + - r.failed + - "'parameters are mutually exclusive' in r.msg" + + - name: Create pod using template (direct specification) + community.kubernetes.k8s: + template: "pod_template_one.j2" + wait: yes + vars: + k8s_pod_name: pod-1 + k8s_pod_namespace: "{{ template_namespace }}" + register: r + + - name: Assert that pod creation succeeded using template + assert: + that: + - r is successful + + - name: Create pod using template with wrong parameter + community.kubernetes.k8s: + template: + - default + wait: yes + vars: + k8s_pod_name: pod-2 + k8s_pod_namespace: "{{ template_namespace }}" + register: r + ignore_errors: True + + - name: Assert that pod creation failed using template due to wrong parameter + assert: + that: + - r is failed + - "'Error while reading template file' in r.msg" + + - name: Create pod using template (path parameter) + community.kubernetes.k8s: + template: + path: "pod_template_one.j2" + wait: yes + vars: + k8s_pod_name: pod-3 + k8s_pod_namespace: "{{ template_namespace }}" + register: r + + - name: Assert that pod creation succeeded using template + assert: + that: + - r is successful + + - name: Create pod using template (different variable string) + community.kubernetes.k8s: + template: + path: "pod_template_two.j2" + variable_start_string: '[[' + variable_end_string: ']]' + wait: yes + vars: + k8s_pod_name: pod-4 + k8s_pod_namespace: "[[ template_namespace ]]" + ansible_python_interpreter: "[[ ansible_playbook_python ]]" + register: r + + - name: Assert that pod creation succeeded using template + assert: + that: + - r is successful + + - name: Create pods using multi-resource template + community.kubernetes.k8s: + template: + path: "pod_template_three.j2" + wait: yes + vars: + k8s_pod_name_one: pod-5 + k8s_pod_name_two: pod-6 + k8s_pod_namespace: "{{ template_namespace }}" + register: r + + - name: Assert that pod creation succeeded using template + assert: + that: + - r is successful + + - name: Remove Pod (Cleanup) + k8s: + api_version: v1 + kind: Pod + name: "pod-{{ item }}" + namespace: "{{ template_namespace }}" + state: absent + wait: yes + ignore_errors: yes + loop: "{{ range(1, 7) | list }}" + + always: + - name: Remove namespace (Cleanup) + k8s: + kind: Namespace + name: "{{ template_namespace }}" + state: absent diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/tasks/waiter.yml b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/tasks/waiter.yml new file mode 100644 index 00000000..4049f6ef --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/tasks/waiter.yml @@ -0,0 +1,363 @@ +--- +- block: + - set_fact: + wait_namespace: wait + + - name: Ensure namespace exists + k8s: + definition: + apiVersion: v1 + kind: Namespace + metadata: + name: "{{ wait_namespace }}" + + - name: Add a simple pod + k8s: + definition: + apiVersion: v1 + kind: Pod + metadata: + name: "{{ k8s_pod_name }}" + namespace: "{{ wait_namespace }}" + spec: "{{ k8s_pod_spec }}" + wait: yes + vars: + k8s_pod_name: wait-pod + k8s_pod_image: alpine:3.8 + k8s_pod_command: + - sleep + - "10000" + register: wait_pod + ignore_errors: yes + + - name: Assert that pod creation succeeded + assert: + that: + - wait_pod is successful + + - name: Add a daemonset + k8s: + definition: + apiVersion: apps/v1 + kind: DaemonSet + metadata: + name: wait-daemonset + namespace: "{{ wait_namespace }}" + spec: + selector: + matchLabels: + app: "{{ k8s_pod_name }}" + template: "{{ k8s_pod_template }}" + wait: yes + wait_sleep: 5 + wait_timeout: 180 + vars: + k8s_pod_name: wait-ds + k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:1 + register: ds + + - name: Check that daemonset wait worked + assert: + that: + - ds.result.status.currentNumberScheduled == ds.result.status.desiredNumberScheduled + + - name: Update a daemonset in check_mode + k8s: + definition: + apiVersion: apps/v1 + kind: DaemonSet + metadata: + name: wait-daemonset + namespace: "{{ wait_namespace }}" + spec: + selector: + matchLabels: + app: "{{ k8s_pod_name }}" + updateStrategy: + type: RollingUpdate + template: "{{ k8s_pod_template }}" + wait: yes + wait_sleep: 3 + wait_timeout: 180 + vars: + k8s_pod_name: wait-ds + k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:2 + register: update_ds_check_mode + check_mode: yes + + - name: Check that check_mode result contains the changes + assert: + that: + - update_ds_check_mode is changed + - "update_ds_check_mode.result.spec.template.spec.containers[0].image == 'gcr.io/kuar-demo/kuard-amd64:2'" + + - name: Update a daemonset + k8s: + definition: + apiVersion: apps/v1 + kind: DaemonSet + metadata: + name: wait-daemonset + namespace: "{{ wait_namespace }}" + spec: + selector: + matchLabels: + app: "{{ k8s_pod_name }}" + updateStrategy: + type: RollingUpdate + template: "{{ k8s_pod_template }}" + wait: yes + wait_sleep: 3 + wait_timeout: 180 + vars: + k8s_pod_name: wait-ds + k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:3 + register: ds + + - name: Get updated pods + k8s_info: + api_version: v1 + kind: Pod + namespace: "{{ wait_namespace }}" + label_selectors: + - app=wait-ds + field_selectors: + - status.phase=Running + register: updated_ds_pods + + - name: Check that daemonset wait worked + assert: + that: + - ds.result.status.currentNumberScheduled == ds.result.status.desiredNumberScheduled + - updated_ds_pods.resources[0].spec.containers[0].image.endswith(":3") + + - name: Add a crashing pod + k8s: + definition: + apiVersion: v1 + kind: Pod + metadata: + name: "{{ k8s_pod_name }}" + namespace: "{{ wait_namespace }}" + spec: "{{ k8s_pod_spec }}" + wait: yes + wait_sleep: 1 + wait_timeout: 30 + vars: + k8s_pod_name: wait-crash-pod + k8s_pod_image: alpine:3.8 + k8s_pod_command: + - /bin/false + register: crash_pod + ignore_errors: yes + + - name: Check that task failed + assert: + that: + - crash_pod is failed + + - name: Use a non-existent image + k8s: + definition: + apiVersion: v1 + kind: Pod + metadata: + name: "{{ k8s_pod_name }}" + namespace: "{{ wait_namespace }}" + spec: "{{ k8s_pod_spec }}" + wait: yes + wait_sleep: 1 + wait_timeout: 30 + vars: + k8s_pod_name: wait-no-image-pod + k8s_pod_image: i_made_this_up:and_this_too + register: no_image_pod + ignore_errors: yes + + - name: Check that task failed + assert: + that: + - no_image_pod is failed + + - name: Add a deployment + k8s: + definition: + apiVersion: apps/v1 + kind: Deployment + metadata: + name: wait-deploy + namespace: "{{ wait_namespace }}" + spec: + replicas: 3 + selector: + matchLabels: + app: "{{ k8s_pod_name }}" + template: "{{ k8s_pod_template }}" + wait: yes + vars: + k8s_pod_name: wait-deploy + k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:1 + k8s_pod_ports: + - containerPort: 8080 + name: http + protocol: TCP + + register: deploy + + - name: Check that deployment wait worked + assert: + that: + - deploy.result.status.availableReplicas == deploy.result.status.replicas + + - name: Update a deployment + k8s: + definition: + apiVersion: apps/v1 + kind: Deployment + metadata: + name: wait-deploy + namespace: "{{ wait_namespace }}" + spec: + replicas: 3 + selector: + matchLabels: + app: "{{ k8s_pod_name }}" + template: "{{ k8s_pod_template }}" + wait: yes + vars: + k8s_pod_name: wait-deploy + k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:2 + k8s_pod_ports: + - containerPort: 8080 + name: http + protocol: TCP + register: update_deploy + + # It looks like the Deployment is updated to have the desired state *before* the pods are terminated + # Wait a couple of seconds to allow the old pods to at least get to Terminating state + - name: Avoid race condition + pause: + seconds: 2 + + - name: Get updated pods + k8s_info: + api_version: v1 + kind: Pod + namespace: "{{ wait_namespace }}" + label_selectors: + - app=wait-deploy + field_selectors: + - status.phase=Running + register: updated_deploy_pods + until: "{{ updated_deploy_pods.resources[0].spec.containers[0].image.endswith(':2') }}" + retries: 6 + delay: 5 + + - name: Check that deployment wait worked + assert: + that: + - deploy.result.status.availableReplicas == deploy.result.status.replicas + + - name: Pause a deployment + k8s: + definition: + apiVersion: apps/v1 + kind: Deployment + metadata: + name: wait-deploy + namespace: "{{ wait_namespace }}" + spec: + paused: True + apply: no + wait: yes + wait_condition: + type: Progressing + status: Unknown + reason: DeploymentPaused + register: pause_deploy + + - name: Check that paused deployment wait worked + assert: + that: + - condition.reason == "DeploymentPaused" + - condition.status == "Unknown" + vars: + condition: '{{ pause_deploy.result.status.conditions[1] }}' + + - name: Add a service based on the deployment + k8s: + definition: + apiVersion: v1 + kind: Service + metadata: + name: wait-svc + namespace: "{{ wait_namespace }}" + spec: + selector: + app: "{{ k8s_pod_name }}" + ports: + - port: 8080 + targetPort: 8080 + protocol: TCP + wait: yes + vars: + k8s_pod_name: wait-deploy + register: service + + - name: Assert that waiting for service works + assert: + that: + - service is successful + + - name: Add a crashing deployment + k8s: + definition: + apiVersion: apps/v1 + kind: Deployment + metadata: + name: wait-crash-deploy + namespace: "{{ wait_namespace }}" + spec: + replicas: 3 + selector: + matchLabels: + app: "{{ k8s_pod_name }}" + template: "{{ k8s_pod_template }}" + wait: yes + vars: + k8s_pod_name: wait-crash-deploy + k8s_pod_image: alpine:3.8 + k8s_pod_command: + - /bin/false + register: wait_crash_deploy + ignore_errors: yes + + - name: Check that task failed + assert: + that: + - wait_crash_deploy is failed + + - name: Remove Pod with very short timeout + k8s: + api_version: v1 + kind: Pod + name: wait-pod + namespace: "{{ wait_namespace }}" + state: absent + wait: yes + wait_sleep: 2 + wait_timeout: 5 + ignore_errors: yes + register: short_wait_remove_pod + + - name: Check that task failed + assert: + that: + - short_wait_remove_pod is failed + + always: + - name: Remove namespace + k8s: + kind: Namespace + name: "{{ wait_namespace }}" + state: absent diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/templates/pod_template_one.j2 b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/templates/pod_template_one.j2 new file mode 100644 index 00000000..bafb7d9f --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/templates/pod_template_one.j2 @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: Pod +metadata: + labels: + app: "{{ k8s_pod_name }}" + name: '{{ k8s_pod_name }}' + namespace: '{{ k8s_pod_namespace }}' +spec: + containers: + - args: + - /bin/sh + - -c + - while true; do echo $(date); sleep 10; done + image: python:3.7-alpine + imagePullPolicy: Always + name: '{{ k8s_pod_name }}' diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/templates/pod_template_three.j2 b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/templates/pod_template_three.j2 new file mode 100644 index 00000000..06e4686e --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/templates/pod_template_three.j2 @@ -0,0 +1,35 @@ +--- +apiVersion: v1 +kind: Pod +metadata: + labels: + app: "{{ k8s_pod_name_one }}" + name: '{{ k8s_pod_name_one }}' + namespace: '{{ k8s_pod_namespace }}' +spec: + containers: + - args: + - /bin/sh + - -c + - while true; do echo $(date); sleep 10; done + image: python:3.7-alpine + imagePullPolicy: Always + name: '{{ k8s_pod_name_one }}' + +--- +apiVersion: v1 +kind: Pod +metadata: + labels: + app: "{{ k8s_pod_name_two }}" + name: '{{ k8s_pod_name_two }}' + namespace: '{{ k8s_pod_namespace }}' +spec: + containers: + - args: + - /bin/sh + - -c + - while true; do echo $(date); sleep 10; done + image: python:3.7-alpine + imagePullPolicy: Always + name: '{{ k8s_pod_name_two }}' diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/templates/pod_template_two.j2 b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/templates/pod_template_two.j2 new file mode 100644 index 00000000..cef89bf1 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/templates/pod_template_two.j2 @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: Pod +metadata: + labels: + app: '[[ k8s_pod_name ]]' + name: '[[ k8s_pod_name ]]' + namespace: '[[ k8s_pod_namespace ]]' +spec: + containers: + - args: + - /bin/sh + - -c + - while true; do echo $(date); sleep 10; done + image: python:3.7-alpine + imagePullPolicy: Always + name: '[[ k8s_pod_name ]]' diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/vars/main.yml b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/vars/main.yml new file mode 100644 index 00000000..a478de97 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/molecule/default/vars/main.yml @@ -0,0 +1,40 @@ +--- +k8s_pod_metadata: + labels: + app: "{{ k8s_pod_name }}" + +k8s_pod_spec: + serviceAccount: "{{ k8s_pod_service_account }}" + containers: + - image: "{{ k8s_pod_image }}" + imagePullPolicy: Always + name: "{{ k8s_pod_name }}" + command: "{{ k8s_pod_command }}" + readinessProbe: + initialDelaySeconds: 15 + exec: + command: + - /bin/true + resources: "{{ k8s_pod_resources }}" + ports: "{{ k8s_pod_ports }}" + env: "{{ k8s_pod_env }}" + + +k8s_pod_service_account: default + +k8s_pod_resources: + limits: + cpu: "100m" + memory: "100Mi" + +k8s_pod_command: [] + +k8s_pod_ports: [] + +k8s_pod_env: [] + +k8s_pod_template: + metadata: "{{ k8s_pod_metadata }}" + spec: "{{ k8s_pod_spec }}" + +kubernetes_role_path: ../../tests/integration/targets/kubernetes diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/plugins/action/helm.py b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/action/helm.py new file mode 100644 index 00000000..6b26225f --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/action/helm.py @@ -0,0 +1,198 @@ +# Copyright (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com> +# Copyright (c) 2017, Toshio Kuratomi <tkuraotmi@ansible.com> +# Copyright (c) 2020, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import copy +import traceback + +from ansible.config.manager import ensure_type +from ansible.errors import AnsibleError, AnsibleFileNotFound, AnsibleAction, AnsibleActionFail +from ansible.module_utils.parsing.convert_bool import boolean +from ansible.module_utils.six import string_types +from ansible.module_utils._text import to_text, to_bytes, to_native +from ansible.plugins.action import ActionBase + + +class ActionModule(ActionBase): + + TRANSFERS_FILES = True + DEFAULT_NEWLINE_SEQUENCE = "\n" + + def _ensure_invocation(self, result): + # NOTE: adding invocation arguments here needs to be kept in sync with + # any no_log specified in the argument_spec in the module. + if 'invocation' not in result: + if self._play_context.no_log: + result['invocation'] = "CENSORED: no_log is set" + else: + result['invocation'] = self._task.args.copy() + result['invocation']['module_args'] = self._task.args.copy() + + return result + + def run(self, tmp=None, task_vars=None): + ''' handler for k8s options ''' + if task_vars is None: + task_vars = dict() + + result = super(ActionModule, self).run(tmp, task_vars) + del tmp # tmp no longer has any effect + + new_module_args = copy.deepcopy(self._task.args) + kubeconfig = self._task.args.get('kubeconfig', None) + # find the file in the expected search path + if kubeconfig: + try: + # find in expected paths + kubeconfig = self._find_needle('files', kubeconfig) + except AnsibleError as e: + result['failed'] = True + result['msg'] = to_text(e) + result['exception'] = traceback.format_exc() + return result + + if kubeconfig: + # decrypt kubeconfig found + actual_file = self._loader.get_real_file(kubeconfig, decrypt=True) + new_module_args['kubeconfig'] = actual_file + + # find the file in the expected search path + src = self._task.args.get('src', None) + if src: + try: + # find in expected paths + src = self._find_needle('files', src) + except AnsibleError as e: + result['failed'] = True + result['msg'] = to_text(e) + result['exception'] = traceback.format_exc() + return result + + if src: + new_module_args['src'] = src + + template = self._task.args.get('template', None) + if template: + # template is only supported by k8s module. + if self._task.action not in ('k8s', 'community.kubernetes.k8s', 'community.okd.k8s'): + raise AnsibleActionFail("'template' is only supported parameter for 'k8s' module.") + if isinstance(template, string_types): + # treat this as raw_params + template_path = template + newline_sequence = self.DEFAULT_NEWLINE_SEQUENCE + variable_start_string = None + variable_end_string = None + block_start_string = None + block_end_string = None + trim_blocks = True + lstrip_blocks = False + elif isinstance(template, dict): + template_args = template + template_path = template_args.get('path', None) + if not template: + raise AnsibleActionFail("Please specify path for template.") + + # Options type validation strings + for s_type in ('newline_sequence', 'variable_start_string', 'variable_end_string', 'block_start_string', + 'block_end_string'): + if s_type in template_args: + value = ensure_type(template_args[s_type], 'string') + if value is not None and not isinstance(value, string_types): + raise AnsibleActionFail("%s is expected to be a string, but got %s instead" % (s_type, type(value))) + try: + trim_blocks = boolean(template_args.get('trim_blocks', True), strict=False) + lstrip_blocks = boolean(template_args.get('lstrip_blocks', False), strict=False) + except TypeError as e: + raise AnsibleActionFail(to_native(e)) + + newline_sequence = template_args.get('newline_sequence', self.DEFAULT_NEWLINE_SEQUENCE) + variable_start_string = template_args.get('variable_start_string', None) + variable_end_string = template_args.get('variable_end_string', None) + block_start_string = template_args.get('block_start_string', None) + block_end_string = template_args.get('block_end_string', None) + else: + raise AnsibleActionFail("Error while reading template file - " + "a string or dict for template expected, but got %s instead" % type(template)) + try: + source = self._find_needle('templates', template_path) + except AnsibleError as e: + raise AnsibleActionFail(to_text(e)) + + # Option `lstrip_blocks' was added in Jinja2 version 2.7. + if lstrip_blocks: + try: + import jinja2.defaults + except ImportError: + raise AnsibleError('Unable to import Jinja2 defaults for determining Jinja2 features.') + + try: + jinja2.defaults.LSTRIP_BLOCKS + except AttributeError: + raise AnsibleError("Option `lstrip_blocks' is only available in Jinja2 versions >=2.7") + + wrong_sequences = ["\\n", "\\r", "\\r\\n"] + allowed_sequences = ["\n", "\r", "\r\n"] + + # We need to convert unescaped sequences to proper escaped sequences for Jinja2 + if newline_sequence in wrong_sequences: + newline_sequence = allowed_sequences[wrong_sequences.index(newline_sequence)] + elif newline_sequence not in allowed_sequences: + raise AnsibleActionFail("newline_sequence needs to be one of: \n, \r or \r\n") + + # Get vault decrypted tmp file + try: + tmp_source = self._loader.get_real_file(source) + except AnsibleFileNotFound as e: + raise AnsibleActionFail("could not find template=%s, %s" % (source, to_text(e))) + b_tmp_source = to_bytes(tmp_source, errors='surrogate_or_strict') + + # template the source data locally & get ready to transfer + try: + with open(b_tmp_source, 'rb') as f: + try: + template_data = to_text(f.read(), errors='surrogate_or_strict') + except UnicodeError: + raise AnsibleActionFail("Template source files must be utf-8 encoded") + + # add ansible 'template' vars + temp_vars = task_vars.copy() + old_vars = self._templar.available_variables + + self._templar.environment.newline_sequence = newline_sequence + if block_start_string is not None: + self._templar.environment.block_start_string = block_start_string + if block_end_string is not None: + self._templar.environment.block_end_string = block_end_string + if variable_start_string is not None: + self._templar.environment.variable_start_string = variable_start_string + if variable_end_string is not None: + self._templar.environment.variable_end_string = variable_end_string + self._templar.environment.trim_blocks = trim_blocks + self._templar.environment.lstrip_blocks = lstrip_blocks + self._templar.available_variables = temp_vars + resultant = self._templar.do_template(template_data, preserve_trailing_newlines=True, escape_backslashes=False) + self._templar.available_variables = old_vars + resource_definition = self._task.args.get('definition', None) + if not resource_definition: + new_module_args.pop('template') + new_module_args['definition'] = resultant + except AnsibleAction: + raise + except Exception as e: + raise AnsibleActionFail("%s: %s" % (type(e).__name__, to_text(e))) + finally: + self._loader.cleanup_tmp_file(b_tmp_source) + + # Execute the k8s_* module. + module_return = self._execute_module(module_name=self._task.action, module_args=new_module_args, task_vars=task_vars) + + # Delete tmp path + self._remove_tmp_path(self._connection._shell.tmpdir) + + result.update(module_return) + + return self._ensure_invocation(result) diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/plugins/action/helm_info.py b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/action/helm_info.py new file mode 100644 index 00000000..6b26225f --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/action/helm_info.py @@ -0,0 +1,198 @@ +# Copyright (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com> +# Copyright (c) 2017, Toshio Kuratomi <tkuraotmi@ansible.com> +# Copyright (c) 2020, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import copy +import traceback + +from ansible.config.manager import ensure_type +from ansible.errors import AnsibleError, AnsibleFileNotFound, AnsibleAction, AnsibleActionFail +from ansible.module_utils.parsing.convert_bool import boolean +from ansible.module_utils.six import string_types +from ansible.module_utils._text import to_text, to_bytes, to_native +from ansible.plugins.action import ActionBase + + +class ActionModule(ActionBase): + + TRANSFERS_FILES = True + DEFAULT_NEWLINE_SEQUENCE = "\n" + + def _ensure_invocation(self, result): + # NOTE: adding invocation arguments here needs to be kept in sync with + # any no_log specified in the argument_spec in the module. + if 'invocation' not in result: + if self._play_context.no_log: + result['invocation'] = "CENSORED: no_log is set" + else: + result['invocation'] = self._task.args.copy() + result['invocation']['module_args'] = self._task.args.copy() + + return result + + def run(self, tmp=None, task_vars=None): + ''' handler for k8s options ''' + if task_vars is None: + task_vars = dict() + + result = super(ActionModule, self).run(tmp, task_vars) + del tmp # tmp no longer has any effect + + new_module_args = copy.deepcopy(self._task.args) + kubeconfig = self._task.args.get('kubeconfig', None) + # find the file in the expected search path + if kubeconfig: + try: + # find in expected paths + kubeconfig = self._find_needle('files', kubeconfig) + except AnsibleError as e: + result['failed'] = True + result['msg'] = to_text(e) + result['exception'] = traceback.format_exc() + return result + + if kubeconfig: + # decrypt kubeconfig found + actual_file = self._loader.get_real_file(kubeconfig, decrypt=True) + new_module_args['kubeconfig'] = actual_file + + # find the file in the expected search path + src = self._task.args.get('src', None) + if src: + try: + # find in expected paths + src = self._find_needle('files', src) + except AnsibleError as e: + result['failed'] = True + result['msg'] = to_text(e) + result['exception'] = traceback.format_exc() + return result + + if src: + new_module_args['src'] = src + + template = self._task.args.get('template', None) + if template: + # template is only supported by k8s module. + if self._task.action not in ('k8s', 'community.kubernetes.k8s', 'community.okd.k8s'): + raise AnsibleActionFail("'template' is only supported parameter for 'k8s' module.") + if isinstance(template, string_types): + # treat this as raw_params + template_path = template + newline_sequence = self.DEFAULT_NEWLINE_SEQUENCE + variable_start_string = None + variable_end_string = None + block_start_string = None + block_end_string = None + trim_blocks = True + lstrip_blocks = False + elif isinstance(template, dict): + template_args = template + template_path = template_args.get('path', None) + if not template: + raise AnsibleActionFail("Please specify path for template.") + + # Options type validation strings + for s_type in ('newline_sequence', 'variable_start_string', 'variable_end_string', 'block_start_string', + 'block_end_string'): + if s_type in template_args: + value = ensure_type(template_args[s_type], 'string') + if value is not None and not isinstance(value, string_types): + raise AnsibleActionFail("%s is expected to be a string, but got %s instead" % (s_type, type(value))) + try: + trim_blocks = boolean(template_args.get('trim_blocks', True), strict=False) + lstrip_blocks = boolean(template_args.get('lstrip_blocks', False), strict=False) + except TypeError as e: + raise AnsibleActionFail(to_native(e)) + + newline_sequence = template_args.get('newline_sequence', self.DEFAULT_NEWLINE_SEQUENCE) + variable_start_string = template_args.get('variable_start_string', None) + variable_end_string = template_args.get('variable_end_string', None) + block_start_string = template_args.get('block_start_string', None) + block_end_string = template_args.get('block_end_string', None) + else: + raise AnsibleActionFail("Error while reading template file - " + "a string or dict for template expected, but got %s instead" % type(template)) + try: + source = self._find_needle('templates', template_path) + except AnsibleError as e: + raise AnsibleActionFail(to_text(e)) + + # Option `lstrip_blocks' was added in Jinja2 version 2.7. + if lstrip_blocks: + try: + import jinja2.defaults + except ImportError: + raise AnsibleError('Unable to import Jinja2 defaults for determining Jinja2 features.') + + try: + jinja2.defaults.LSTRIP_BLOCKS + except AttributeError: + raise AnsibleError("Option `lstrip_blocks' is only available in Jinja2 versions >=2.7") + + wrong_sequences = ["\\n", "\\r", "\\r\\n"] + allowed_sequences = ["\n", "\r", "\r\n"] + + # We need to convert unescaped sequences to proper escaped sequences for Jinja2 + if newline_sequence in wrong_sequences: + newline_sequence = allowed_sequences[wrong_sequences.index(newline_sequence)] + elif newline_sequence not in allowed_sequences: + raise AnsibleActionFail("newline_sequence needs to be one of: \n, \r or \r\n") + + # Get vault decrypted tmp file + try: + tmp_source = self._loader.get_real_file(source) + except AnsibleFileNotFound as e: + raise AnsibleActionFail("could not find template=%s, %s" % (source, to_text(e))) + b_tmp_source = to_bytes(tmp_source, errors='surrogate_or_strict') + + # template the source data locally & get ready to transfer + try: + with open(b_tmp_source, 'rb') as f: + try: + template_data = to_text(f.read(), errors='surrogate_or_strict') + except UnicodeError: + raise AnsibleActionFail("Template source files must be utf-8 encoded") + + # add ansible 'template' vars + temp_vars = task_vars.copy() + old_vars = self._templar.available_variables + + self._templar.environment.newline_sequence = newline_sequence + if block_start_string is not None: + self._templar.environment.block_start_string = block_start_string + if block_end_string is not None: + self._templar.environment.block_end_string = block_end_string + if variable_start_string is not None: + self._templar.environment.variable_start_string = variable_start_string + if variable_end_string is not None: + self._templar.environment.variable_end_string = variable_end_string + self._templar.environment.trim_blocks = trim_blocks + self._templar.environment.lstrip_blocks = lstrip_blocks + self._templar.available_variables = temp_vars + resultant = self._templar.do_template(template_data, preserve_trailing_newlines=True, escape_backslashes=False) + self._templar.available_variables = old_vars + resource_definition = self._task.args.get('definition', None) + if not resource_definition: + new_module_args.pop('template') + new_module_args['definition'] = resultant + except AnsibleAction: + raise + except Exception as e: + raise AnsibleActionFail("%s: %s" % (type(e).__name__, to_text(e))) + finally: + self._loader.cleanup_tmp_file(b_tmp_source) + + # Execute the k8s_* module. + module_return = self._execute_module(module_name=self._task.action, module_args=new_module_args, task_vars=task_vars) + + # Delete tmp path + self._remove_tmp_path(self._connection._shell.tmpdir) + + result.update(module_return) + + return self._ensure_invocation(result) diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/plugins/action/helm_plugin.py b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/action/helm_plugin.py new file mode 100644 index 00000000..6b26225f --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/action/helm_plugin.py @@ -0,0 +1,198 @@ +# Copyright (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com> +# Copyright (c) 2017, Toshio Kuratomi <tkuraotmi@ansible.com> +# Copyright (c) 2020, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import copy +import traceback + +from ansible.config.manager import ensure_type +from ansible.errors import AnsibleError, AnsibleFileNotFound, AnsibleAction, AnsibleActionFail +from ansible.module_utils.parsing.convert_bool import boolean +from ansible.module_utils.six import string_types +from ansible.module_utils._text import to_text, to_bytes, to_native +from ansible.plugins.action import ActionBase + + +class ActionModule(ActionBase): + + TRANSFERS_FILES = True + DEFAULT_NEWLINE_SEQUENCE = "\n" + + def _ensure_invocation(self, result): + # NOTE: adding invocation arguments here needs to be kept in sync with + # any no_log specified in the argument_spec in the module. + if 'invocation' not in result: + if self._play_context.no_log: + result['invocation'] = "CENSORED: no_log is set" + else: + result['invocation'] = self._task.args.copy() + result['invocation']['module_args'] = self._task.args.copy() + + return result + + def run(self, tmp=None, task_vars=None): + ''' handler for k8s options ''' + if task_vars is None: + task_vars = dict() + + result = super(ActionModule, self).run(tmp, task_vars) + del tmp # tmp no longer has any effect + + new_module_args = copy.deepcopy(self._task.args) + kubeconfig = self._task.args.get('kubeconfig', None) + # find the file in the expected search path + if kubeconfig: + try: + # find in expected paths + kubeconfig = self._find_needle('files', kubeconfig) + except AnsibleError as e: + result['failed'] = True + result['msg'] = to_text(e) + result['exception'] = traceback.format_exc() + return result + + if kubeconfig: + # decrypt kubeconfig found + actual_file = self._loader.get_real_file(kubeconfig, decrypt=True) + new_module_args['kubeconfig'] = actual_file + + # find the file in the expected search path + src = self._task.args.get('src', None) + if src: + try: + # find in expected paths + src = self._find_needle('files', src) + except AnsibleError as e: + result['failed'] = True + result['msg'] = to_text(e) + result['exception'] = traceback.format_exc() + return result + + if src: + new_module_args['src'] = src + + template = self._task.args.get('template', None) + if template: + # template is only supported by k8s module. + if self._task.action not in ('k8s', 'community.kubernetes.k8s', 'community.okd.k8s'): + raise AnsibleActionFail("'template' is only supported parameter for 'k8s' module.") + if isinstance(template, string_types): + # treat this as raw_params + template_path = template + newline_sequence = self.DEFAULT_NEWLINE_SEQUENCE + variable_start_string = None + variable_end_string = None + block_start_string = None + block_end_string = None + trim_blocks = True + lstrip_blocks = False + elif isinstance(template, dict): + template_args = template + template_path = template_args.get('path', None) + if not template: + raise AnsibleActionFail("Please specify path for template.") + + # Options type validation strings + for s_type in ('newline_sequence', 'variable_start_string', 'variable_end_string', 'block_start_string', + 'block_end_string'): + if s_type in template_args: + value = ensure_type(template_args[s_type], 'string') + if value is not None and not isinstance(value, string_types): + raise AnsibleActionFail("%s is expected to be a string, but got %s instead" % (s_type, type(value))) + try: + trim_blocks = boolean(template_args.get('trim_blocks', True), strict=False) + lstrip_blocks = boolean(template_args.get('lstrip_blocks', False), strict=False) + except TypeError as e: + raise AnsibleActionFail(to_native(e)) + + newline_sequence = template_args.get('newline_sequence', self.DEFAULT_NEWLINE_SEQUENCE) + variable_start_string = template_args.get('variable_start_string', None) + variable_end_string = template_args.get('variable_end_string', None) + block_start_string = template_args.get('block_start_string', None) + block_end_string = template_args.get('block_end_string', None) + else: + raise AnsibleActionFail("Error while reading template file - " + "a string or dict for template expected, but got %s instead" % type(template)) + try: + source = self._find_needle('templates', template_path) + except AnsibleError as e: + raise AnsibleActionFail(to_text(e)) + + # Option `lstrip_blocks' was added in Jinja2 version 2.7. + if lstrip_blocks: + try: + import jinja2.defaults + except ImportError: + raise AnsibleError('Unable to import Jinja2 defaults for determining Jinja2 features.') + + try: + jinja2.defaults.LSTRIP_BLOCKS + except AttributeError: + raise AnsibleError("Option `lstrip_blocks' is only available in Jinja2 versions >=2.7") + + wrong_sequences = ["\\n", "\\r", "\\r\\n"] + allowed_sequences = ["\n", "\r", "\r\n"] + + # We need to convert unescaped sequences to proper escaped sequences for Jinja2 + if newline_sequence in wrong_sequences: + newline_sequence = allowed_sequences[wrong_sequences.index(newline_sequence)] + elif newline_sequence not in allowed_sequences: + raise AnsibleActionFail("newline_sequence needs to be one of: \n, \r or \r\n") + + # Get vault decrypted tmp file + try: + tmp_source = self._loader.get_real_file(source) + except AnsibleFileNotFound as e: + raise AnsibleActionFail("could not find template=%s, %s" % (source, to_text(e))) + b_tmp_source = to_bytes(tmp_source, errors='surrogate_or_strict') + + # template the source data locally & get ready to transfer + try: + with open(b_tmp_source, 'rb') as f: + try: + template_data = to_text(f.read(), errors='surrogate_or_strict') + except UnicodeError: + raise AnsibleActionFail("Template source files must be utf-8 encoded") + + # add ansible 'template' vars + temp_vars = task_vars.copy() + old_vars = self._templar.available_variables + + self._templar.environment.newline_sequence = newline_sequence + if block_start_string is not None: + self._templar.environment.block_start_string = block_start_string + if block_end_string is not None: + self._templar.environment.block_end_string = block_end_string + if variable_start_string is not None: + self._templar.environment.variable_start_string = variable_start_string + if variable_end_string is not None: + self._templar.environment.variable_end_string = variable_end_string + self._templar.environment.trim_blocks = trim_blocks + self._templar.environment.lstrip_blocks = lstrip_blocks + self._templar.available_variables = temp_vars + resultant = self._templar.do_template(template_data, preserve_trailing_newlines=True, escape_backslashes=False) + self._templar.available_variables = old_vars + resource_definition = self._task.args.get('definition', None) + if not resource_definition: + new_module_args.pop('template') + new_module_args['definition'] = resultant + except AnsibleAction: + raise + except Exception as e: + raise AnsibleActionFail("%s: %s" % (type(e).__name__, to_text(e))) + finally: + self._loader.cleanup_tmp_file(b_tmp_source) + + # Execute the k8s_* module. + module_return = self._execute_module(module_name=self._task.action, module_args=new_module_args, task_vars=task_vars) + + # Delete tmp path + self._remove_tmp_path(self._connection._shell.tmpdir) + + result.update(module_return) + + return self._ensure_invocation(result) diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/plugins/action/helm_plugin_info.py b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/action/helm_plugin_info.py new file mode 100644 index 00000000..6b26225f --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/action/helm_plugin_info.py @@ -0,0 +1,198 @@ +# Copyright (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com> +# Copyright (c) 2017, Toshio Kuratomi <tkuraotmi@ansible.com> +# Copyright (c) 2020, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import copy +import traceback + +from ansible.config.manager import ensure_type +from ansible.errors import AnsibleError, AnsibleFileNotFound, AnsibleAction, AnsibleActionFail +from ansible.module_utils.parsing.convert_bool import boolean +from ansible.module_utils.six import string_types +from ansible.module_utils._text import to_text, to_bytes, to_native +from ansible.plugins.action import ActionBase + + +class ActionModule(ActionBase): + + TRANSFERS_FILES = True + DEFAULT_NEWLINE_SEQUENCE = "\n" + + def _ensure_invocation(self, result): + # NOTE: adding invocation arguments here needs to be kept in sync with + # any no_log specified in the argument_spec in the module. + if 'invocation' not in result: + if self._play_context.no_log: + result['invocation'] = "CENSORED: no_log is set" + else: + result['invocation'] = self._task.args.copy() + result['invocation']['module_args'] = self._task.args.copy() + + return result + + def run(self, tmp=None, task_vars=None): + ''' handler for k8s options ''' + if task_vars is None: + task_vars = dict() + + result = super(ActionModule, self).run(tmp, task_vars) + del tmp # tmp no longer has any effect + + new_module_args = copy.deepcopy(self._task.args) + kubeconfig = self._task.args.get('kubeconfig', None) + # find the file in the expected search path + if kubeconfig: + try: + # find in expected paths + kubeconfig = self._find_needle('files', kubeconfig) + except AnsibleError as e: + result['failed'] = True + result['msg'] = to_text(e) + result['exception'] = traceback.format_exc() + return result + + if kubeconfig: + # decrypt kubeconfig found + actual_file = self._loader.get_real_file(kubeconfig, decrypt=True) + new_module_args['kubeconfig'] = actual_file + + # find the file in the expected search path + src = self._task.args.get('src', None) + if src: + try: + # find in expected paths + src = self._find_needle('files', src) + except AnsibleError as e: + result['failed'] = True + result['msg'] = to_text(e) + result['exception'] = traceback.format_exc() + return result + + if src: + new_module_args['src'] = src + + template = self._task.args.get('template', None) + if template: + # template is only supported by k8s module. + if self._task.action not in ('k8s', 'community.kubernetes.k8s', 'community.okd.k8s'): + raise AnsibleActionFail("'template' is only supported parameter for 'k8s' module.") + if isinstance(template, string_types): + # treat this as raw_params + template_path = template + newline_sequence = self.DEFAULT_NEWLINE_SEQUENCE + variable_start_string = None + variable_end_string = None + block_start_string = None + block_end_string = None + trim_blocks = True + lstrip_blocks = False + elif isinstance(template, dict): + template_args = template + template_path = template_args.get('path', None) + if not template: + raise AnsibleActionFail("Please specify path for template.") + + # Options type validation strings + for s_type in ('newline_sequence', 'variable_start_string', 'variable_end_string', 'block_start_string', + 'block_end_string'): + if s_type in template_args: + value = ensure_type(template_args[s_type], 'string') + if value is not None and not isinstance(value, string_types): + raise AnsibleActionFail("%s is expected to be a string, but got %s instead" % (s_type, type(value))) + try: + trim_blocks = boolean(template_args.get('trim_blocks', True), strict=False) + lstrip_blocks = boolean(template_args.get('lstrip_blocks', False), strict=False) + except TypeError as e: + raise AnsibleActionFail(to_native(e)) + + newline_sequence = template_args.get('newline_sequence', self.DEFAULT_NEWLINE_SEQUENCE) + variable_start_string = template_args.get('variable_start_string', None) + variable_end_string = template_args.get('variable_end_string', None) + block_start_string = template_args.get('block_start_string', None) + block_end_string = template_args.get('block_end_string', None) + else: + raise AnsibleActionFail("Error while reading template file - " + "a string or dict for template expected, but got %s instead" % type(template)) + try: + source = self._find_needle('templates', template_path) + except AnsibleError as e: + raise AnsibleActionFail(to_text(e)) + + # Option `lstrip_blocks' was added in Jinja2 version 2.7. + if lstrip_blocks: + try: + import jinja2.defaults + except ImportError: + raise AnsibleError('Unable to import Jinja2 defaults for determining Jinja2 features.') + + try: + jinja2.defaults.LSTRIP_BLOCKS + except AttributeError: + raise AnsibleError("Option `lstrip_blocks' is only available in Jinja2 versions >=2.7") + + wrong_sequences = ["\\n", "\\r", "\\r\\n"] + allowed_sequences = ["\n", "\r", "\r\n"] + + # We need to convert unescaped sequences to proper escaped sequences for Jinja2 + if newline_sequence in wrong_sequences: + newline_sequence = allowed_sequences[wrong_sequences.index(newline_sequence)] + elif newline_sequence not in allowed_sequences: + raise AnsibleActionFail("newline_sequence needs to be one of: \n, \r or \r\n") + + # Get vault decrypted tmp file + try: + tmp_source = self._loader.get_real_file(source) + except AnsibleFileNotFound as e: + raise AnsibleActionFail("could not find template=%s, %s" % (source, to_text(e))) + b_tmp_source = to_bytes(tmp_source, errors='surrogate_or_strict') + + # template the source data locally & get ready to transfer + try: + with open(b_tmp_source, 'rb') as f: + try: + template_data = to_text(f.read(), errors='surrogate_or_strict') + except UnicodeError: + raise AnsibleActionFail("Template source files must be utf-8 encoded") + + # add ansible 'template' vars + temp_vars = task_vars.copy() + old_vars = self._templar.available_variables + + self._templar.environment.newline_sequence = newline_sequence + if block_start_string is not None: + self._templar.environment.block_start_string = block_start_string + if block_end_string is not None: + self._templar.environment.block_end_string = block_end_string + if variable_start_string is not None: + self._templar.environment.variable_start_string = variable_start_string + if variable_end_string is not None: + self._templar.environment.variable_end_string = variable_end_string + self._templar.environment.trim_blocks = trim_blocks + self._templar.environment.lstrip_blocks = lstrip_blocks + self._templar.available_variables = temp_vars + resultant = self._templar.do_template(template_data, preserve_trailing_newlines=True, escape_backslashes=False) + self._templar.available_variables = old_vars + resource_definition = self._task.args.get('definition', None) + if not resource_definition: + new_module_args.pop('template') + new_module_args['definition'] = resultant + except AnsibleAction: + raise + except Exception as e: + raise AnsibleActionFail("%s: %s" % (type(e).__name__, to_text(e))) + finally: + self._loader.cleanup_tmp_file(b_tmp_source) + + # Execute the k8s_* module. + module_return = self._execute_module(module_name=self._task.action, module_args=new_module_args, task_vars=task_vars) + + # Delete tmp path + self._remove_tmp_path(self._connection._shell.tmpdir) + + result.update(module_return) + + return self._ensure_invocation(result) diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/plugins/action/helm_repository.py b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/action/helm_repository.py new file mode 100644 index 00000000..6b26225f --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/action/helm_repository.py @@ -0,0 +1,198 @@ +# Copyright (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com> +# Copyright (c) 2017, Toshio Kuratomi <tkuraotmi@ansible.com> +# Copyright (c) 2020, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import copy +import traceback + +from ansible.config.manager import ensure_type +from ansible.errors import AnsibleError, AnsibleFileNotFound, AnsibleAction, AnsibleActionFail +from ansible.module_utils.parsing.convert_bool import boolean +from ansible.module_utils.six import string_types +from ansible.module_utils._text import to_text, to_bytes, to_native +from ansible.plugins.action import ActionBase + + +class ActionModule(ActionBase): + + TRANSFERS_FILES = True + DEFAULT_NEWLINE_SEQUENCE = "\n" + + def _ensure_invocation(self, result): + # NOTE: adding invocation arguments here needs to be kept in sync with + # any no_log specified in the argument_spec in the module. + if 'invocation' not in result: + if self._play_context.no_log: + result['invocation'] = "CENSORED: no_log is set" + else: + result['invocation'] = self._task.args.copy() + result['invocation']['module_args'] = self._task.args.copy() + + return result + + def run(self, tmp=None, task_vars=None): + ''' handler for k8s options ''' + if task_vars is None: + task_vars = dict() + + result = super(ActionModule, self).run(tmp, task_vars) + del tmp # tmp no longer has any effect + + new_module_args = copy.deepcopy(self._task.args) + kubeconfig = self._task.args.get('kubeconfig', None) + # find the file in the expected search path + if kubeconfig: + try: + # find in expected paths + kubeconfig = self._find_needle('files', kubeconfig) + except AnsibleError as e: + result['failed'] = True + result['msg'] = to_text(e) + result['exception'] = traceback.format_exc() + return result + + if kubeconfig: + # decrypt kubeconfig found + actual_file = self._loader.get_real_file(kubeconfig, decrypt=True) + new_module_args['kubeconfig'] = actual_file + + # find the file in the expected search path + src = self._task.args.get('src', None) + if src: + try: + # find in expected paths + src = self._find_needle('files', src) + except AnsibleError as e: + result['failed'] = True + result['msg'] = to_text(e) + result['exception'] = traceback.format_exc() + return result + + if src: + new_module_args['src'] = src + + template = self._task.args.get('template', None) + if template: + # template is only supported by k8s module. + if self._task.action not in ('k8s', 'community.kubernetes.k8s', 'community.okd.k8s'): + raise AnsibleActionFail("'template' is only supported parameter for 'k8s' module.") + if isinstance(template, string_types): + # treat this as raw_params + template_path = template + newline_sequence = self.DEFAULT_NEWLINE_SEQUENCE + variable_start_string = None + variable_end_string = None + block_start_string = None + block_end_string = None + trim_blocks = True + lstrip_blocks = False + elif isinstance(template, dict): + template_args = template + template_path = template_args.get('path', None) + if not template: + raise AnsibleActionFail("Please specify path for template.") + + # Options type validation strings + for s_type in ('newline_sequence', 'variable_start_string', 'variable_end_string', 'block_start_string', + 'block_end_string'): + if s_type in template_args: + value = ensure_type(template_args[s_type], 'string') + if value is not None and not isinstance(value, string_types): + raise AnsibleActionFail("%s is expected to be a string, but got %s instead" % (s_type, type(value))) + try: + trim_blocks = boolean(template_args.get('trim_blocks', True), strict=False) + lstrip_blocks = boolean(template_args.get('lstrip_blocks', False), strict=False) + except TypeError as e: + raise AnsibleActionFail(to_native(e)) + + newline_sequence = template_args.get('newline_sequence', self.DEFAULT_NEWLINE_SEQUENCE) + variable_start_string = template_args.get('variable_start_string', None) + variable_end_string = template_args.get('variable_end_string', None) + block_start_string = template_args.get('block_start_string', None) + block_end_string = template_args.get('block_end_string', None) + else: + raise AnsibleActionFail("Error while reading template file - " + "a string or dict for template expected, but got %s instead" % type(template)) + try: + source = self._find_needle('templates', template_path) + except AnsibleError as e: + raise AnsibleActionFail(to_text(e)) + + # Option `lstrip_blocks' was added in Jinja2 version 2.7. + if lstrip_blocks: + try: + import jinja2.defaults + except ImportError: + raise AnsibleError('Unable to import Jinja2 defaults for determining Jinja2 features.') + + try: + jinja2.defaults.LSTRIP_BLOCKS + except AttributeError: + raise AnsibleError("Option `lstrip_blocks' is only available in Jinja2 versions >=2.7") + + wrong_sequences = ["\\n", "\\r", "\\r\\n"] + allowed_sequences = ["\n", "\r", "\r\n"] + + # We need to convert unescaped sequences to proper escaped sequences for Jinja2 + if newline_sequence in wrong_sequences: + newline_sequence = allowed_sequences[wrong_sequences.index(newline_sequence)] + elif newline_sequence not in allowed_sequences: + raise AnsibleActionFail("newline_sequence needs to be one of: \n, \r or \r\n") + + # Get vault decrypted tmp file + try: + tmp_source = self._loader.get_real_file(source) + except AnsibleFileNotFound as e: + raise AnsibleActionFail("could not find template=%s, %s" % (source, to_text(e))) + b_tmp_source = to_bytes(tmp_source, errors='surrogate_or_strict') + + # template the source data locally & get ready to transfer + try: + with open(b_tmp_source, 'rb') as f: + try: + template_data = to_text(f.read(), errors='surrogate_or_strict') + except UnicodeError: + raise AnsibleActionFail("Template source files must be utf-8 encoded") + + # add ansible 'template' vars + temp_vars = task_vars.copy() + old_vars = self._templar.available_variables + + self._templar.environment.newline_sequence = newline_sequence + if block_start_string is not None: + self._templar.environment.block_start_string = block_start_string + if block_end_string is not None: + self._templar.environment.block_end_string = block_end_string + if variable_start_string is not None: + self._templar.environment.variable_start_string = variable_start_string + if variable_end_string is not None: + self._templar.environment.variable_end_string = variable_end_string + self._templar.environment.trim_blocks = trim_blocks + self._templar.environment.lstrip_blocks = lstrip_blocks + self._templar.available_variables = temp_vars + resultant = self._templar.do_template(template_data, preserve_trailing_newlines=True, escape_backslashes=False) + self._templar.available_variables = old_vars + resource_definition = self._task.args.get('definition', None) + if not resource_definition: + new_module_args.pop('template') + new_module_args['definition'] = resultant + except AnsibleAction: + raise + except Exception as e: + raise AnsibleActionFail("%s: %s" % (type(e).__name__, to_text(e))) + finally: + self._loader.cleanup_tmp_file(b_tmp_source) + + # Execute the k8s_* module. + module_return = self._execute_module(module_name=self._task.action, module_args=new_module_args, task_vars=task_vars) + + # Delete tmp path + self._remove_tmp_path(self._connection._shell.tmpdir) + + result.update(module_return) + + return self._ensure_invocation(result) diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/plugins/action/k8s.py b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/action/k8s.py new file mode 100644 index 00000000..6b26225f --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/action/k8s.py @@ -0,0 +1,198 @@ +# Copyright (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com> +# Copyright (c) 2017, Toshio Kuratomi <tkuraotmi@ansible.com> +# Copyright (c) 2020, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import copy +import traceback + +from ansible.config.manager import ensure_type +from ansible.errors import AnsibleError, AnsibleFileNotFound, AnsibleAction, AnsibleActionFail +from ansible.module_utils.parsing.convert_bool import boolean +from ansible.module_utils.six import string_types +from ansible.module_utils._text import to_text, to_bytes, to_native +from ansible.plugins.action import ActionBase + + +class ActionModule(ActionBase): + + TRANSFERS_FILES = True + DEFAULT_NEWLINE_SEQUENCE = "\n" + + def _ensure_invocation(self, result): + # NOTE: adding invocation arguments here needs to be kept in sync with + # any no_log specified in the argument_spec in the module. + if 'invocation' not in result: + if self._play_context.no_log: + result['invocation'] = "CENSORED: no_log is set" + else: + result['invocation'] = self._task.args.copy() + result['invocation']['module_args'] = self._task.args.copy() + + return result + + def run(self, tmp=None, task_vars=None): + ''' handler for k8s options ''' + if task_vars is None: + task_vars = dict() + + result = super(ActionModule, self).run(tmp, task_vars) + del tmp # tmp no longer has any effect + + new_module_args = copy.deepcopy(self._task.args) + kubeconfig = self._task.args.get('kubeconfig', None) + # find the file in the expected search path + if kubeconfig: + try: + # find in expected paths + kubeconfig = self._find_needle('files', kubeconfig) + except AnsibleError as e: + result['failed'] = True + result['msg'] = to_text(e) + result['exception'] = traceback.format_exc() + return result + + if kubeconfig: + # decrypt kubeconfig found + actual_file = self._loader.get_real_file(kubeconfig, decrypt=True) + new_module_args['kubeconfig'] = actual_file + + # find the file in the expected search path + src = self._task.args.get('src', None) + if src: + try: + # find in expected paths + src = self._find_needle('files', src) + except AnsibleError as e: + result['failed'] = True + result['msg'] = to_text(e) + result['exception'] = traceback.format_exc() + return result + + if src: + new_module_args['src'] = src + + template = self._task.args.get('template', None) + if template: + # template is only supported by k8s module. + if self._task.action not in ('k8s', 'community.kubernetes.k8s', 'community.okd.k8s'): + raise AnsibleActionFail("'template' is only supported parameter for 'k8s' module.") + if isinstance(template, string_types): + # treat this as raw_params + template_path = template + newline_sequence = self.DEFAULT_NEWLINE_SEQUENCE + variable_start_string = None + variable_end_string = None + block_start_string = None + block_end_string = None + trim_blocks = True + lstrip_blocks = False + elif isinstance(template, dict): + template_args = template + template_path = template_args.get('path', None) + if not template: + raise AnsibleActionFail("Please specify path for template.") + + # Options type validation strings + for s_type in ('newline_sequence', 'variable_start_string', 'variable_end_string', 'block_start_string', + 'block_end_string'): + if s_type in template_args: + value = ensure_type(template_args[s_type], 'string') + if value is not None and not isinstance(value, string_types): + raise AnsibleActionFail("%s is expected to be a string, but got %s instead" % (s_type, type(value))) + try: + trim_blocks = boolean(template_args.get('trim_blocks', True), strict=False) + lstrip_blocks = boolean(template_args.get('lstrip_blocks', False), strict=False) + except TypeError as e: + raise AnsibleActionFail(to_native(e)) + + newline_sequence = template_args.get('newline_sequence', self.DEFAULT_NEWLINE_SEQUENCE) + variable_start_string = template_args.get('variable_start_string', None) + variable_end_string = template_args.get('variable_end_string', None) + block_start_string = template_args.get('block_start_string', None) + block_end_string = template_args.get('block_end_string', None) + else: + raise AnsibleActionFail("Error while reading template file - " + "a string or dict for template expected, but got %s instead" % type(template)) + try: + source = self._find_needle('templates', template_path) + except AnsibleError as e: + raise AnsibleActionFail(to_text(e)) + + # Option `lstrip_blocks' was added in Jinja2 version 2.7. + if lstrip_blocks: + try: + import jinja2.defaults + except ImportError: + raise AnsibleError('Unable to import Jinja2 defaults for determining Jinja2 features.') + + try: + jinja2.defaults.LSTRIP_BLOCKS + except AttributeError: + raise AnsibleError("Option `lstrip_blocks' is only available in Jinja2 versions >=2.7") + + wrong_sequences = ["\\n", "\\r", "\\r\\n"] + allowed_sequences = ["\n", "\r", "\r\n"] + + # We need to convert unescaped sequences to proper escaped sequences for Jinja2 + if newline_sequence in wrong_sequences: + newline_sequence = allowed_sequences[wrong_sequences.index(newline_sequence)] + elif newline_sequence not in allowed_sequences: + raise AnsibleActionFail("newline_sequence needs to be one of: \n, \r or \r\n") + + # Get vault decrypted tmp file + try: + tmp_source = self._loader.get_real_file(source) + except AnsibleFileNotFound as e: + raise AnsibleActionFail("could not find template=%s, %s" % (source, to_text(e))) + b_tmp_source = to_bytes(tmp_source, errors='surrogate_or_strict') + + # template the source data locally & get ready to transfer + try: + with open(b_tmp_source, 'rb') as f: + try: + template_data = to_text(f.read(), errors='surrogate_or_strict') + except UnicodeError: + raise AnsibleActionFail("Template source files must be utf-8 encoded") + + # add ansible 'template' vars + temp_vars = task_vars.copy() + old_vars = self._templar.available_variables + + self._templar.environment.newline_sequence = newline_sequence + if block_start_string is not None: + self._templar.environment.block_start_string = block_start_string + if block_end_string is not None: + self._templar.environment.block_end_string = block_end_string + if variable_start_string is not None: + self._templar.environment.variable_start_string = variable_start_string + if variable_end_string is not None: + self._templar.environment.variable_end_string = variable_end_string + self._templar.environment.trim_blocks = trim_blocks + self._templar.environment.lstrip_blocks = lstrip_blocks + self._templar.available_variables = temp_vars + resultant = self._templar.do_template(template_data, preserve_trailing_newlines=True, escape_backslashes=False) + self._templar.available_variables = old_vars + resource_definition = self._task.args.get('definition', None) + if not resource_definition: + new_module_args.pop('template') + new_module_args['definition'] = resultant + except AnsibleAction: + raise + except Exception as e: + raise AnsibleActionFail("%s: %s" % (type(e).__name__, to_text(e))) + finally: + self._loader.cleanup_tmp_file(b_tmp_source) + + # Execute the k8s_* module. + module_return = self._execute_module(module_name=self._task.action, module_args=new_module_args, task_vars=task_vars) + + # Delete tmp path + self._remove_tmp_path(self._connection._shell.tmpdir) + + result.update(module_return) + + return self._ensure_invocation(result) diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/plugins/action/k8s_auth.py b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/action/k8s_auth.py new file mode 100644 index 00000000..6b26225f --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/action/k8s_auth.py @@ -0,0 +1,198 @@ +# Copyright (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com> +# Copyright (c) 2017, Toshio Kuratomi <tkuraotmi@ansible.com> +# Copyright (c) 2020, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import copy +import traceback + +from ansible.config.manager import ensure_type +from ansible.errors import AnsibleError, AnsibleFileNotFound, AnsibleAction, AnsibleActionFail +from ansible.module_utils.parsing.convert_bool import boolean +from ansible.module_utils.six import string_types +from ansible.module_utils._text import to_text, to_bytes, to_native +from ansible.plugins.action import ActionBase + + +class ActionModule(ActionBase): + + TRANSFERS_FILES = True + DEFAULT_NEWLINE_SEQUENCE = "\n" + + def _ensure_invocation(self, result): + # NOTE: adding invocation arguments here needs to be kept in sync with + # any no_log specified in the argument_spec in the module. + if 'invocation' not in result: + if self._play_context.no_log: + result['invocation'] = "CENSORED: no_log is set" + else: + result['invocation'] = self._task.args.copy() + result['invocation']['module_args'] = self._task.args.copy() + + return result + + def run(self, tmp=None, task_vars=None): + ''' handler for k8s options ''' + if task_vars is None: + task_vars = dict() + + result = super(ActionModule, self).run(tmp, task_vars) + del tmp # tmp no longer has any effect + + new_module_args = copy.deepcopy(self._task.args) + kubeconfig = self._task.args.get('kubeconfig', None) + # find the file in the expected search path + if kubeconfig: + try: + # find in expected paths + kubeconfig = self._find_needle('files', kubeconfig) + except AnsibleError as e: + result['failed'] = True + result['msg'] = to_text(e) + result['exception'] = traceback.format_exc() + return result + + if kubeconfig: + # decrypt kubeconfig found + actual_file = self._loader.get_real_file(kubeconfig, decrypt=True) + new_module_args['kubeconfig'] = actual_file + + # find the file in the expected search path + src = self._task.args.get('src', None) + if src: + try: + # find in expected paths + src = self._find_needle('files', src) + except AnsibleError as e: + result['failed'] = True + result['msg'] = to_text(e) + result['exception'] = traceback.format_exc() + return result + + if src: + new_module_args['src'] = src + + template = self._task.args.get('template', None) + if template: + # template is only supported by k8s module. + if self._task.action not in ('k8s', 'community.kubernetes.k8s', 'community.okd.k8s'): + raise AnsibleActionFail("'template' is only supported parameter for 'k8s' module.") + if isinstance(template, string_types): + # treat this as raw_params + template_path = template + newline_sequence = self.DEFAULT_NEWLINE_SEQUENCE + variable_start_string = None + variable_end_string = None + block_start_string = None + block_end_string = None + trim_blocks = True + lstrip_blocks = False + elif isinstance(template, dict): + template_args = template + template_path = template_args.get('path', None) + if not template: + raise AnsibleActionFail("Please specify path for template.") + + # Options type validation strings + for s_type in ('newline_sequence', 'variable_start_string', 'variable_end_string', 'block_start_string', + 'block_end_string'): + if s_type in template_args: + value = ensure_type(template_args[s_type], 'string') + if value is not None and not isinstance(value, string_types): + raise AnsibleActionFail("%s is expected to be a string, but got %s instead" % (s_type, type(value))) + try: + trim_blocks = boolean(template_args.get('trim_blocks', True), strict=False) + lstrip_blocks = boolean(template_args.get('lstrip_blocks', False), strict=False) + except TypeError as e: + raise AnsibleActionFail(to_native(e)) + + newline_sequence = template_args.get('newline_sequence', self.DEFAULT_NEWLINE_SEQUENCE) + variable_start_string = template_args.get('variable_start_string', None) + variable_end_string = template_args.get('variable_end_string', None) + block_start_string = template_args.get('block_start_string', None) + block_end_string = template_args.get('block_end_string', None) + else: + raise AnsibleActionFail("Error while reading template file - " + "a string or dict for template expected, but got %s instead" % type(template)) + try: + source = self._find_needle('templates', template_path) + except AnsibleError as e: + raise AnsibleActionFail(to_text(e)) + + # Option `lstrip_blocks' was added in Jinja2 version 2.7. + if lstrip_blocks: + try: + import jinja2.defaults + except ImportError: + raise AnsibleError('Unable to import Jinja2 defaults for determining Jinja2 features.') + + try: + jinja2.defaults.LSTRIP_BLOCKS + except AttributeError: + raise AnsibleError("Option `lstrip_blocks' is only available in Jinja2 versions >=2.7") + + wrong_sequences = ["\\n", "\\r", "\\r\\n"] + allowed_sequences = ["\n", "\r", "\r\n"] + + # We need to convert unescaped sequences to proper escaped sequences for Jinja2 + if newline_sequence in wrong_sequences: + newline_sequence = allowed_sequences[wrong_sequences.index(newline_sequence)] + elif newline_sequence not in allowed_sequences: + raise AnsibleActionFail("newline_sequence needs to be one of: \n, \r or \r\n") + + # Get vault decrypted tmp file + try: + tmp_source = self._loader.get_real_file(source) + except AnsibleFileNotFound as e: + raise AnsibleActionFail("could not find template=%s, %s" % (source, to_text(e))) + b_tmp_source = to_bytes(tmp_source, errors='surrogate_or_strict') + + # template the source data locally & get ready to transfer + try: + with open(b_tmp_source, 'rb') as f: + try: + template_data = to_text(f.read(), errors='surrogate_or_strict') + except UnicodeError: + raise AnsibleActionFail("Template source files must be utf-8 encoded") + + # add ansible 'template' vars + temp_vars = task_vars.copy() + old_vars = self._templar.available_variables + + self._templar.environment.newline_sequence = newline_sequence + if block_start_string is not None: + self._templar.environment.block_start_string = block_start_string + if block_end_string is not None: + self._templar.environment.block_end_string = block_end_string + if variable_start_string is not None: + self._templar.environment.variable_start_string = variable_start_string + if variable_end_string is not None: + self._templar.environment.variable_end_string = variable_end_string + self._templar.environment.trim_blocks = trim_blocks + self._templar.environment.lstrip_blocks = lstrip_blocks + self._templar.available_variables = temp_vars + resultant = self._templar.do_template(template_data, preserve_trailing_newlines=True, escape_backslashes=False) + self._templar.available_variables = old_vars + resource_definition = self._task.args.get('definition', None) + if not resource_definition: + new_module_args.pop('template') + new_module_args['definition'] = resultant + except AnsibleAction: + raise + except Exception as e: + raise AnsibleActionFail("%s: %s" % (type(e).__name__, to_text(e))) + finally: + self._loader.cleanup_tmp_file(b_tmp_source) + + # Execute the k8s_* module. + module_return = self._execute_module(module_name=self._task.action, module_args=new_module_args, task_vars=task_vars) + + # Delete tmp path + self._remove_tmp_path(self._connection._shell.tmpdir) + + result.update(module_return) + + return self._ensure_invocation(result) diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/plugins/action/k8s_exec.py b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/action/k8s_exec.py new file mode 100644 index 00000000..6b26225f --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/action/k8s_exec.py @@ -0,0 +1,198 @@ +# Copyright (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com> +# Copyright (c) 2017, Toshio Kuratomi <tkuraotmi@ansible.com> +# Copyright (c) 2020, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import copy +import traceback + +from ansible.config.manager import ensure_type +from ansible.errors import AnsibleError, AnsibleFileNotFound, AnsibleAction, AnsibleActionFail +from ansible.module_utils.parsing.convert_bool import boolean +from ansible.module_utils.six import string_types +from ansible.module_utils._text import to_text, to_bytes, to_native +from ansible.plugins.action import ActionBase + + +class ActionModule(ActionBase): + + TRANSFERS_FILES = True + DEFAULT_NEWLINE_SEQUENCE = "\n" + + def _ensure_invocation(self, result): + # NOTE: adding invocation arguments here needs to be kept in sync with + # any no_log specified in the argument_spec in the module. + if 'invocation' not in result: + if self._play_context.no_log: + result['invocation'] = "CENSORED: no_log is set" + else: + result['invocation'] = self._task.args.copy() + result['invocation']['module_args'] = self._task.args.copy() + + return result + + def run(self, tmp=None, task_vars=None): + ''' handler for k8s options ''' + if task_vars is None: + task_vars = dict() + + result = super(ActionModule, self).run(tmp, task_vars) + del tmp # tmp no longer has any effect + + new_module_args = copy.deepcopy(self._task.args) + kubeconfig = self._task.args.get('kubeconfig', None) + # find the file in the expected search path + if kubeconfig: + try: + # find in expected paths + kubeconfig = self._find_needle('files', kubeconfig) + except AnsibleError as e: + result['failed'] = True + result['msg'] = to_text(e) + result['exception'] = traceback.format_exc() + return result + + if kubeconfig: + # decrypt kubeconfig found + actual_file = self._loader.get_real_file(kubeconfig, decrypt=True) + new_module_args['kubeconfig'] = actual_file + + # find the file in the expected search path + src = self._task.args.get('src', None) + if src: + try: + # find in expected paths + src = self._find_needle('files', src) + except AnsibleError as e: + result['failed'] = True + result['msg'] = to_text(e) + result['exception'] = traceback.format_exc() + return result + + if src: + new_module_args['src'] = src + + template = self._task.args.get('template', None) + if template: + # template is only supported by k8s module. + if self._task.action not in ('k8s', 'community.kubernetes.k8s', 'community.okd.k8s'): + raise AnsibleActionFail("'template' is only supported parameter for 'k8s' module.") + if isinstance(template, string_types): + # treat this as raw_params + template_path = template + newline_sequence = self.DEFAULT_NEWLINE_SEQUENCE + variable_start_string = None + variable_end_string = None + block_start_string = None + block_end_string = None + trim_blocks = True + lstrip_blocks = False + elif isinstance(template, dict): + template_args = template + template_path = template_args.get('path', None) + if not template: + raise AnsibleActionFail("Please specify path for template.") + + # Options type validation strings + for s_type in ('newline_sequence', 'variable_start_string', 'variable_end_string', 'block_start_string', + 'block_end_string'): + if s_type in template_args: + value = ensure_type(template_args[s_type], 'string') + if value is not None and not isinstance(value, string_types): + raise AnsibleActionFail("%s is expected to be a string, but got %s instead" % (s_type, type(value))) + try: + trim_blocks = boolean(template_args.get('trim_blocks', True), strict=False) + lstrip_blocks = boolean(template_args.get('lstrip_blocks', False), strict=False) + except TypeError as e: + raise AnsibleActionFail(to_native(e)) + + newline_sequence = template_args.get('newline_sequence', self.DEFAULT_NEWLINE_SEQUENCE) + variable_start_string = template_args.get('variable_start_string', None) + variable_end_string = template_args.get('variable_end_string', None) + block_start_string = template_args.get('block_start_string', None) + block_end_string = template_args.get('block_end_string', None) + else: + raise AnsibleActionFail("Error while reading template file - " + "a string or dict for template expected, but got %s instead" % type(template)) + try: + source = self._find_needle('templates', template_path) + except AnsibleError as e: + raise AnsibleActionFail(to_text(e)) + + # Option `lstrip_blocks' was added in Jinja2 version 2.7. + if lstrip_blocks: + try: + import jinja2.defaults + except ImportError: + raise AnsibleError('Unable to import Jinja2 defaults for determining Jinja2 features.') + + try: + jinja2.defaults.LSTRIP_BLOCKS + except AttributeError: + raise AnsibleError("Option `lstrip_blocks' is only available in Jinja2 versions >=2.7") + + wrong_sequences = ["\\n", "\\r", "\\r\\n"] + allowed_sequences = ["\n", "\r", "\r\n"] + + # We need to convert unescaped sequences to proper escaped sequences for Jinja2 + if newline_sequence in wrong_sequences: + newline_sequence = allowed_sequences[wrong_sequences.index(newline_sequence)] + elif newline_sequence not in allowed_sequences: + raise AnsibleActionFail("newline_sequence needs to be one of: \n, \r or \r\n") + + # Get vault decrypted tmp file + try: + tmp_source = self._loader.get_real_file(source) + except AnsibleFileNotFound as e: + raise AnsibleActionFail("could not find template=%s, %s" % (source, to_text(e))) + b_tmp_source = to_bytes(tmp_source, errors='surrogate_or_strict') + + # template the source data locally & get ready to transfer + try: + with open(b_tmp_source, 'rb') as f: + try: + template_data = to_text(f.read(), errors='surrogate_or_strict') + except UnicodeError: + raise AnsibleActionFail("Template source files must be utf-8 encoded") + + # add ansible 'template' vars + temp_vars = task_vars.copy() + old_vars = self._templar.available_variables + + self._templar.environment.newline_sequence = newline_sequence + if block_start_string is not None: + self._templar.environment.block_start_string = block_start_string + if block_end_string is not None: + self._templar.environment.block_end_string = block_end_string + if variable_start_string is not None: + self._templar.environment.variable_start_string = variable_start_string + if variable_end_string is not None: + self._templar.environment.variable_end_string = variable_end_string + self._templar.environment.trim_blocks = trim_blocks + self._templar.environment.lstrip_blocks = lstrip_blocks + self._templar.available_variables = temp_vars + resultant = self._templar.do_template(template_data, preserve_trailing_newlines=True, escape_backslashes=False) + self._templar.available_variables = old_vars + resource_definition = self._task.args.get('definition', None) + if not resource_definition: + new_module_args.pop('template') + new_module_args['definition'] = resultant + except AnsibleAction: + raise + except Exception as e: + raise AnsibleActionFail("%s: %s" % (type(e).__name__, to_text(e))) + finally: + self._loader.cleanup_tmp_file(b_tmp_source) + + # Execute the k8s_* module. + module_return = self._execute_module(module_name=self._task.action, module_args=new_module_args, task_vars=task_vars) + + # Delete tmp path + self._remove_tmp_path(self._connection._shell.tmpdir) + + result.update(module_return) + + return self._ensure_invocation(result) diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/plugins/action/k8s_info.py b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/action/k8s_info.py new file mode 100644 index 00000000..6b26225f --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/action/k8s_info.py @@ -0,0 +1,198 @@ +# Copyright (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com> +# Copyright (c) 2017, Toshio Kuratomi <tkuraotmi@ansible.com> +# Copyright (c) 2020, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import copy +import traceback + +from ansible.config.manager import ensure_type +from ansible.errors import AnsibleError, AnsibleFileNotFound, AnsibleAction, AnsibleActionFail +from ansible.module_utils.parsing.convert_bool import boolean +from ansible.module_utils.six import string_types +from ansible.module_utils._text import to_text, to_bytes, to_native +from ansible.plugins.action import ActionBase + + +class ActionModule(ActionBase): + + TRANSFERS_FILES = True + DEFAULT_NEWLINE_SEQUENCE = "\n" + + def _ensure_invocation(self, result): + # NOTE: adding invocation arguments here needs to be kept in sync with + # any no_log specified in the argument_spec in the module. + if 'invocation' not in result: + if self._play_context.no_log: + result['invocation'] = "CENSORED: no_log is set" + else: + result['invocation'] = self._task.args.copy() + result['invocation']['module_args'] = self._task.args.copy() + + return result + + def run(self, tmp=None, task_vars=None): + ''' handler for k8s options ''' + if task_vars is None: + task_vars = dict() + + result = super(ActionModule, self).run(tmp, task_vars) + del tmp # tmp no longer has any effect + + new_module_args = copy.deepcopy(self._task.args) + kubeconfig = self._task.args.get('kubeconfig', None) + # find the file in the expected search path + if kubeconfig: + try: + # find in expected paths + kubeconfig = self._find_needle('files', kubeconfig) + except AnsibleError as e: + result['failed'] = True + result['msg'] = to_text(e) + result['exception'] = traceback.format_exc() + return result + + if kubeconfig: + # decrypt kubeconfig found + actual_file = self._loader.get_real_file(kubeconfig, decrypt=True) + new_module_args['kubeconfig'] = actual_file + + # find the file in the expected search path + src = self._task.args.get('src', None) + if src: + try: + # find in expected paths + src = self._find_needle('files', src) + except AnsibleError as e: + result['failed'] = True + result['msg'] = to_text(e) + result['exception'] = traceback.format_exc() + return result + + if src: + new_module_args['src'] = src + + template = self._task.args.get('template', None) + if template: + # template is only supported by k8s module. + if self._task.action not in ('k8s', 'community.kubernetes.k8s', 'community.okd.k8s'): + raise AnsibleActionFail("'template' is only supported parameter for 'k8s' module.") + if isinstance(template, string_types): + # treat this as raw_params + template_path = template + newline_sequence = self.DEFAULT_NEWLINE_SEQUENCE + variable_start_string = None + variable_end_string = None + block_start_string = None + block_end_string = None + trim_blocks = True + lstrip_blocks = False + elif isinstance(template, dict): + template_args = template + template_path = template_args.get('path', None) + if not template: + raise AnsibleActionFail("Please specify path for template.") + + # Options type validation strings + for s_type in ('newline_sequence', 'variable_start_string', 'variable_end_string', 'block_start_string', + 'block_end_string'): + if s_type in template_args: + value = ensure_type(template_args[s_type], 'string') + if value is not None and not isinstance(value, string_types): + raise AnsibleActionFail("%s is expected to be a string, but got %s instead" % (s_type, type(value))) + try: + trim_blocks = boolean(template_args.get('trim_blocks', True), strict=False) + lstrip_blocks = boolean(template_args.get('lstrip_blocks', False), strict=False) + except TypeError as e: + raise AnsibleActionFail(to_native(e)) + + newline_sequence = template_args.get('newline_sequence', self.DEFAULT_NEWLINE_SEQUENCE) + variable_start_string = template_args.get('variable_start_string', None) + variable_end_string = template_args.get('variable_end_string', None) + block_start_string = template_args.get('block_start_string', None) + block_end_string = template_args.get('block_end_string', None) + else: + raise AnsibleActionFail("Error while reading template file - " + "a string or dict for template expected, but got %s instead" % type(template)) + try: + source = self._find_needle('templates', template_path) + except AnsibleError as e: + raise AnsibleActionFail(to_text(e)) + + # Option `lstrip_blocks' was added in Jinja2 version 2.7. + if lstrip_blocks: + try: + import jinja2.defaults + except ImportError: + raise AnsibleError('Unable to import Jinja2 defaults for determining Jinja2 features.') + + try: + jinja2.defaults.LSTRIP_BLOCKS + except AttributeError: + raise AnsibleError("Option `lstrip_blocks' is only available in Jinja2 versions >=2.7") + + wrong_sequences = ["\\n", "\\r", "\\r\\n"] + allowed_sequences = ["\n", "\r", "\r\n"] + + # We need to convert unescaped sequences to proper escaped sequences for Jinja2 + if newline_sequence in wrong_sequences: + newline_sequence = allowed_sequences[wrong_sequences.index(newline_sequence)] + elif newline_sequence not in allowed_sequences: + raise AnsibleActionFail("newline_sequence needs to be one of: \n, \r or \r\n") + + # Get vault decrypted tmp file + try: + tmp_source = self._loader.get_real_file(source) + except AnsibleFileNotFound as e: + raise AnsibleActionFail("could not find template=%s, %s" % (source, to_text(e))) + b_tmp_source = to_bytes(tmp_source, errors='surrogate_or_strict') + + # template the source data locally & get ready to transfer + try: + with open(b_tmp_source, 'rb') as f: + try: + template_data = to_text(f.read(), errors='surrogate_or_strict') + except UnicodeError: + raise AnsibleActionFail("Template source files must be utf-8 encoded") + + # add ansible 'template' vars + temp_vars = task_vars.copy() + old_vars = self._templar.available_variables + + self._templar.environment.newline_sequence = newline_sequence + if block_start_string is not None: + self._templar.environment.block_start_string = block_start_string + if block_end_string is not None: + self._templar.environment.block_end_string = block_end_string + if variable_start_string is not None: + self._templar.environment.variable_start_string = variable_start_string + if variable_end_string is not None: + self._templar.environment.variable_end_string = variable_end_string + self._templar.environment.trim_blocks = trim_blocks + self._templar.environment.lstrip_blocks = lstrip_blocks + self._templar.available_variables = temp_vars + resultant = self._templar.do_template(template_data, preserve_trailing_newlines=True, escape_backslashes=False) + self._templar.available_variables = old_vars + resource_definition = self._task.args.get('definition', None) + if not resource_definition: + new_module_args.pop('template') + new_module_args['definition'] = resultant + except AnsibleAction: + raise + except Exception as e: + raise AnsibleActionFail("%s: %s" % (type(e).__name__, to_text(e))) + finally: + self._loader.cleanup_tmp_file(b_tmp_source) + + # Execute the k8s_* module. + module_return = self._execute_module(module_name=self._task.action, module_args=new_module_args, task_vars=task_vars) + + # Delete tmp path + self._remove_tmp_path(self._connection._shell.tmpdir) + + result.update(module_return) + + return self._ensure_invocation(result) diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/plugins/action/k8s_log.py b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/action/k8s_log.py new file mode 100644 index 00000000..6b26225f --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/action/k8s_log.py @@ -0,0 +1,198 @@ +# Copyright (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com> +# Copyright (c) 2017, Toshio Kuratomi <tkuraotmi@ansible.com> +# Copyright (c) 2020, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import copy +import traceback + +from ansible.config.manager import ensure_type +from ansible.errors import AnsibleError, AnsibleFileNotFound, AnsibleAction, AnsibleActionFail +from ansible.module_utils.parsing.convert_bool import boolean +from ansible.module_utils.six import string_types +from ansible.module_utils._text import to_text, to_bytes, to_native +from ansible.plugins.action import ActionBase + + +class ActionModule(ActionBase): + + TRANSFERS_FILES = True + DEFAULT_NEWLINE_SEQUENCE = "\n" + + def _ensure_invocation(self, result): + # NOTE: adding invocation arguments here needs to be kept in sync with + # any no_log specified in the argument_spec in the module. + if 'invocation' not in result: + if self._play_context.no_log: + result['invocation'] = "CENSORED: no_log is set" + else: + result['invocation'] = self._task.args.copy() + result['invocation']['module_args'] = self._task.args.copy() + + return result + + def run(self, tmp=None, task_vars=None): + ''' handler for k8s options ''' + if task_vars is None: + task_vars = dict() + + result = super(ActionModule, self).run(tmp, task_vars) + del tmp # tmp no longer has any effect + + new_module_args = copy.deepcopy(self._task.args) + kubeconfig = self._task.args.get('kubeconfig', None) + # find the file in the expected search path + if kubeconfig: + try: + # find in expected paths + kubeconfig = self._find_needle('files', kubeconfig) + except AnsibleError as e: + result['failed'] = True + result['msg'] = to_text(e) + result['exception'] = traceback.format_exc() + return result + + if kubeconfig: + # decrypt kubeconfig found + actual_file = self._loader.get_real_file(kubeconfig, decrypt=True) + new_module_args['kubeconfig'] = actual_file + + # find the file in the expected search path + src = self._task.args.get('src', None) + if src: + try: + # find in expected paths + src = self._find_needle('files', src) + except AnsibleError as e: + result['failed'] = True + result['msg'] = to_text(e) + result['exception'] = traceback.format_exc() + return result + + if src: + new_module_args['src'] = src + + template = self._task.args.get('template', None) + if template: + # template is only supported by k8s module. + if self._task.action not in ('k8s', 'community.kubernetes.k8s', 'community.okd.k8s'): + raise AnsibleActionFail("'template' is only supported parameter for 'k8s' module.") + if isinstance(template, string_types): + # treat this as raw_params + template_path = template + newline_sequence = self.DEFAULT_NEWLINE_SEQUENCE + variable_start_string = None + variable_end_string = None + block_start_string = None + block_end_string = None + trim_blocks = True + lstrip_blocks = False + elif isinstance(template, dict): + template_args = template + template_path = template_args.get('path', None) + if not template: + raise AnsibleActionFail("Please specify path for template.") + + # Options type validation strings + for s_type in ('newline_sequence', 'variable_start_string', 'variable_end_string', 'block_start_string', + 'block_end_string'): + if s_type in template_args: + value = ensure_type(template_args[s_type], 'string') + if value is not None and not isinstance(value, string_types): + raise AnsibleActionFail("%s is expected to be a string, but got %s instead" % (s_type, type(value))) + try: + trim_blocks = boolean(template_args.get('trim_blocks', True), strict=False) + lstrip_blocks = boolean(template_args.get('lstrip_blocks', False), strict=False) + except TypeError as e: + raise AnsibleActionFail(to_native(e)) + + newline_sequence = template_args.get('newline_sequence', self.DEFAULT_NEWLINE_SEQUENCE) + variable_start_string = template_args.get('variable_start_string', None) + variable_end_string = template_args.get('variable_end_string', None) + block_start_string = template_args.get('block_start_string', None) + block_end_string = template_args.get('block_end_string', None) + else: + raise AnsibleActionFail("Error while reading template file - " + "a string or dict for template expected, but got %s instead" % type(template)) + try: + source = self._find_needle('templates', template_path) + except AnsibleError as e: + raise AnsibleActionFail(to_text(e)) + + # Option `lstrip_blocks' was added in Jinja2 version 2.7. + if lstrip_blocks: + try: + import jinja2.defaults + except ImportError: + raise AnsibleError('Unable to import Jinja2 defaults for determining Jinja2 features.') + + try: + jinja2.defaults.LSTRIP_BLOCKS + except AttributeError: + raise AnsibleError("Option `lstrip_blocks' is only available in Jinja2 versions >=2.7") + + wrong_sequences = ["\\n", "\\r", "\\r\\n"] + allowed_sequences = ["\n", "\r", "\r\n"] + + # We need to convert unescaped sequences to proper escaped sequences for Jinja2 + if newline_sequence in wrong_sequences: + newline_sequence = allowed_sequences[wrong_sequences.index(newline_sequence)] + elif newline_sequence not in allowed_sequences: + raise AnsibleActionFail("newline_sequence needs to be one of: \n, \r or \r\n") + + # Get vault decrypted tmp file + try: + tmp_source = self._loader.get_real_file(source) + except AnsibleFileNotFound as e: + raise AnsibleActionFail("could not find template=%s, %s" % (source, to_text(e))) + b_tmp_source = to_bytes(tmp_source, errors='surrogate_or_strict') + + # template the source data locally & get ready to transfer + try: + with open(b_tmp_source, 'rb') as f: + try: + template_data = to_text(f.read(), errors='surrogate_or_strict') + except UnicodeError: + raise AnsibleActionFail("Template source files must be utf-8 encoded") + + # add ansible 'template' vars + temp_vars = task_vars.copy() + old_vars = self._templar.available_variables + + self._templar.environment.newline_sequence = newline_sequence + if block_start_string is not None: + self._templar.environment.block_start_string = block_start_string + if block_end_string is not None: + self._templar.environment.block_end_string = block_end_string + if variable_start_string is not None: + self._templar.environment.variable_start_string = variable_start_string + if variable_end_string is not None: + self._templar.environment.variable_end_string = variable_end_string + self._templar.environment.trim_blocks = trim_blocks + self._templar.environment.lstrip_blocks = lstrip_blocks + self._templar.available_variables = temp_vars + resultant = self._templar.do_template(template_data, preserve_trailing_newlines=True, escape_backslashes=False) + self._templar.available_variables = old_vars + resource_definition = self._task.args.get('definition', None) + if not resource_definition: + new_module_args.pop('template') + new_module_args['definition'] = resultant + except AnsibleAction: + raise + except Exception as e: + raise AnsibleActionFail("%s: %s" % (type(e).__name__, to_text(e))) + finally: + self._loader.cleanup_tmp_file(b_tmp_source) + + # Execute the k8s_* module. + module_return = self._execute_module(module_name=self._task.action, module_args=new_module_args, task_vars=task_vars) + + # Delete tmp path + self._remove_tmp_path(self._connection._shell.tmpdir) + + result.update(module_return) + + return self._ensure_invocation(result) diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/plugins/action/k8s_scale.py b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/action/k8s_scale.py new file mode 100644 index 00000000..6b26225f --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/action/k8s_scale.py @@ -0,0 +1,198 @@ +# Copyright (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com> +# Copyright (c) 2017, Toshio Kuratomi <tkuraotmi@ansible.com> +# Copyright (c) 2020, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import copy +import traceback + +from ansible.config.manager import ensure_type +from ansible.errors import AnsibleError, AnsibleFileNotFound, AnsibleAction, AnsibleActionFail +from ansible.module_utils.parsing.convert_bool import boolean +from ansible.module_utils.six import string_types +from ansible.module_utils._text import to_text, to_bytes, to_native +from ansible.plugins.action import ActionBase + + +class ActionModule(ActionBase): + + TRANSFERS_FILES = True + DEFAULT_NEWLINE_SEQUENCE = "\n" + + def _ensure_invocation(self, result): + # NOTE: adding invocation arguments here needs to be kept in sync with + # any no_log specified in the argument_spec in the module. + if 'invocation' not in result: + if self._play_context.no_log: + result['invocation'] = "CENSORED: no_log is set" + else: + result['invocation'] = self._task.args.copy() + result['invocation']['module_args'] = self._task.args.copy() + + return result + + def run(self, tmp=None, task_vars=None): + ''' handler for k8s options ''' + if task_vars is None: + task_vars = dict() + + result = super(ActionModule, self).run(tmp, task_vars) + del tmp # tmp no longer has any effect + + new_module_args = copy.deepcopy(self._task.args) + kubeconfig = self._task.args.get('kubeconfig', None) + # find the file in the expected search path + if kubeconfig: + try: + # find in expected paths + kubeconfig = self._find_needle('files', kubeconfig) + except AnsibleError as e: + result['failed'] = True + result['msg'] = to_text(e) + result['exception'] = traceback.format_exc() + return result + + if kubeconfig: + # decrypt kubeconfig found + actual_file = self._loader.get_real_file(kubeconfig, decrypt=True) + new_module_args['kubeconfig'] = actual_file + + # find the file in the expected search path + src = self._task.args.get('src', None) + if src: + try: + # find in expected paths + src = self._find_needle('files', src) + except AnsibleError as e: + result['failed'] = True + result['msg'] = to_text(e) + result['exception'] = traceback.format_exc() + return result + + if src: + new_module_args['src'] = src + + template = self._task.args.get('template', None) + if template: + # template is only supported by k8s module. + if self._task.action not in ('k8s', 'community.kubernetes.k8s', 'community.okd.k8s'): + raise AnsibleActionFail("'template' is only supported parameter for 'k8s' module.") + if isinstance(template, string_types): + # treat this as raw_params + template_path = template + newline_sequence = self.DEFAULT_NEWLINE_SEQUENCE + variable_start_string = None + variable_end_string = None + block_start_string = None + block_end_string = None + trim_blocks = True + lstrip_blocks = False + elif isinstance(template, dict): + template_args = template + template_path = template_args.get('path', None) + if not template: + raise AnsibleActionFail("Please specify path for template.") + + # Options type validation strings + for s_type in ('newline_sequence', 'variable_start_string', 'variable_end_string', 'block_start_string', + 'block_end_string'): + if s_type in template_args: + value = ensure_type(template_args[s_type], 'string') + if value is not None and not isinstance(value, string_types): + raise AnsibleActionFail("%s is expected to be a string, but got %s instead" % (s_type, type(value))) + try: + trim_blocks = boolean(template_args.get('trim_blocks', True), strict=False) + lstrip_blocks = boolean(template_args.get('lstrip_blocks', False), strict=False) + except TypeError as e: + raise AnsibleActionFail(to_native(e)) + + newline_sequence = template_args.get('newline_sequence', self.DEFAULT_NEWLINE_SEQUENCE) + variable_start_string = template_args.get('variable_start_string', None) + variable_end_string = template_args.get('variable_end_string', None) + block_start_string = template_args.get('block_start_string', None) + block_end_string = template_args.get('block_end_string', None) + else: + raise AnsibleActionFail("Error while reading template file - " + "a string or dict for template expected, but got %s instead" % type(template)) + try: + source = self._find_needle('templates', template_path) + except AnsibleError as e: + raise AnsibleActionFail(to_text(e)) + + # Option `lstrip_blocks' was added in Jinja2 version 2.7. + if lstrip_blocks: + try: + import jinja2.defaults + except ImportError: + raise AnsibleError('Unable to import Jinja2 defaults for determining Jinja2 features.') + + try: + jinja2.defaults.LSTRIP_BLOCKS + except AttributeError: + raise AnsibleError("Option `lstrip_blocks' is only available in Jinja2 versions >=2.7") + + wrong_sequences = ["\\n", "\\r", "\\r\\n"] + allowed_sequences = ["\n", "\r", "\r\n"] + + # We need to convert unescaped sequences to proper escaped sequences for Jinja2 + if newline_sequence in wrong_sequences: + newline_sequence = allowed_sequences[wrong_sequences.index(newline_sequence)] + elif newline_sequence not in allowed_sequences: + raise AnsibleActionFail("newline_sequence needs to be one of: \n, \r or \r\n") + + # Get vault decrypted tmp file + try: + tmp_source = self._loader.get_real_file(source) + except AnsibleFileNotFound as e: + raise AnsibleActionFail("could not find template=%s, %s" % (source, to_text(e))) + b_tmp_source = to_bytes(tmp_source, errors='surrogate_or_strict') + + # template the source data locally & get ready to transfer + try: + with open(b_tmp_source, 'rb') as f: + try: + template_data = to_text(f.read(), errors='surrogate_or_strict') + except UnicodeError: + raise AnsibleActionFail("Template source files must be utf-8 encoded") + + # add ansible 'template' vars + temp_vars = task_vars.copy() + old_vars = self._templar.available_variables + + self._templar.environment.newline_sequence = newline_sequence + if block_start_string is not None: + self._templar.environment.block_start_string = block_start_string + if block_end_string is not None: + self._templar.environment.block_end_string = block_end_string + if variable_start_string is not None: + self._templar.environment.variable_start_string = variable_start_string + if variable_end_string is not None: + self._templar.environment.variable_end_string = variable_end_string + self._templar.environment.trim_blocks = trim_blocks + self._templar.environment.lstrip_blocks = lstrip_blocks + self._templar.available_variables = temp_vars + resultant = self._templar.do_template(template_data, preserve_trailing_newlines=True, escape_backslashes=False) + self._templar.available_variables = old_vars + resource_definition = self._task.args.get('definition', None) + if not resource_definition: + new_module_args.pop('template') + new_module_args['definition'] = resultant + except AnsibleAction: + raise + except Exception as e: + raise AnsibleActionFail("%s: %s" % (type(e).__name__, to_text(e))) + finally: + self._loader.cleanup_tmp_file(b_tmp_source) + + # Execute the k8s_* module. + module_return = self._execute_module(module_name=self._task.action, module_args=new_module_args, task_vars=task_vars) + + # Delete tmp path + self._remove_tmp_path(self._connection._shell.tmpdir) + + result.update(module_return) + + return self._ensure_invocation(result) diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/plugins/action/k8s_service.py b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/action/k8s_service.py new file mode 100644 index 00000000..6b26225f --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/action/k8s_service.py @@ -0,0 +1,198 @@ +# Copyright (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com> +# Copyright (c) 2017, Toshio Kuratomi <tkuraotmi@ansible.com> +# Copyright (c) 2020, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import copy +import traceback + +from ansible.config.manager import ensure_type +from ansible.errors import AnsibleError, AnsibleFileNotFound, AnsibleAction, AnsibleActionFail +from ansible.module_utils.parsing.convert_bool import boolean +from ansible.module_utils.six import string_types +from ansible.module_utils._text import to_text, to_bytes, to_native +from ansible.plugins.action import ActionBase + + +class ActionModule(ActionBase): + + TRANSFERS_FILES = True + DEFAULT_NEWLINE_SEQUENCE = "\n" + + def _ensure_invocation(self, result): + # NOTE: adding invocation arguments here needs to be kept in sync with + # any no_log specified in the argument_spec in the module. + if 'invocation' not in result: + if self._play_context.no_log: + result['invocation'] = "CENSORED: no_log is set" + else: + result['invocation'] = self._task.args.copy() + result['invocation']['module_args'] = self._task.args.copy() + + return result + + def run(self, tmp=None, task_vars=None): + ''' handler for k8s options ''' + if task_vars is None: + task_vars = dict() + + result = super(ActionModule, self).run(tmp, task_vars) + del tmp # tmp no longer has any effect + + new_module_args = copy.deepcopy(self._task.args) + kubeconfig = self._task.args.get('kubeconfig', None) + # find the file in the expected search path + if kubeconfig: + try: + # find in expected paths + kubeconfig = self._find_needle('files', kubeconfig) + except AnsibleError as e: + result['failed'] = True + result['msg'] = to_text(e) + result['exception'] = traceback.format_exc() + return result + + if kubeconfig: + # decrypt kubeconfig found + actual_file = self._loader.get_real_file(kubeconfig, decrypt=True) + new_module_args['kubeconfig'] = actual_file + + # find the file in the expected search path + src = self._task.args.get('src', None) + if src: + try: + # find in expected paths + src = self._find_needle('files', src) + except AnsibleError as e: + result['failed'] = True + result['msg'] = to_text(e) + result['exception'] = traceback.format_exc() + return result + + if src: + new_module_args['src'] = src + + template = self._task.args.get('template', None) + if template: + # template is only supported by k8s module. + if self._task.action not in ('k8s', 'community.kubernetes.k8s', 'community.okd.k8s'): + raise AnsibleActionFail("'template' is only supported parameter for 'k8s' module.") + if isinstance(template, string_types): + # treat this as raw_params + template_path = template + newline_sequence = self.DEFAULT_NEWLINE_SEQUENCE + variable_start_string = None + variable_end_string = None + block_start_string = None + block_end_string = None + trim_blocks = True + lstrip_blocks = False + elif isinstance(template, dict): + template_args = template + template_path = template_args.get('path', None) + if not template: + raise AnsibleActionFail("Please specify path for template.") + + # Options type validation strings + for s_type in ('newline_sequence', 'variable_start_string', 'variable_end_string', 'block_start_string', + 'block_end_string'): + if s_type in template_args: + value = ensure_type(template_args[s_type], 'string') + if value is not None and not isinstance(value, string_types): + raise AnsibleActionFail("%s is expected to be a string, but got %s instead" % (s_type, type(value))) + try: + trim_blocks = boolean(template_args.get('trim_blocks', True), strict=False) + lstrip_blocks = boolean(template_args.get('lstrip_blocks', False), strict=False) + except TypeError as e: + raise AnsibleActionFail(to_native(e)) + + newline_sequence = template_args.get('newline_sequence', self.DEFAULT_NEWLINE_SEQUENCE) + variable_start_string = template_args.get('variable_start_string', None) + variable_end_string = template_args.get('variable_end_string', None) + block_start_string = template_args.get('block_start_string', None) + block_end_string = template_args.get('block_end_string', None) + else: + raise AnsibleActionFail("Error while reading template file - " + "a string or dict for template expected, but got %s instead" % type(template)) + try: + source = self._find_needle('templates', template_path) + except AnsibleError as e: + raise AnsibleActionFail(to_text(e)) + + # Option `lstrip_blocks' was added in Jinja2 version 2.7. + if lstrip_blocks: + try: + import jinja2.defaults + except ImportError: + raise AnsibleError('Unable to import Jinja2 defaults for determining Jinja2 features.') + + try: + jinja2.defaults.LSTRIP_BLOCKS + except AttributeError: + raise AnsibleError("Option `lstrip_blocks' is only available in Jinja2 versions >=2.7") + + wrong_sequences = ["\\n", "\\r", "\\r\\n"] + allowed_sequences = ["\n", "\r", "\r\n"] + + # We need to convert unescaped sequences to proper escaped sequences for Jinja2 + if newline_sequence in wrong_sequences: + newline_sequence = allowed_sequences[wrong_sequences.index(newline_sequence)] + elif newline_sequence not in allowed_sequences: + raise AnsibleActionFail("newline_sequence needs to be one of: \n, \r or \r\n") + + # Get vault decrypted tmp file + try: + tmp_source = self._loader.get_real_file(source) + except AnsibleFileNotFound as e: + raise AnsibleActionFail("could not find template=%s, %s" % (source, to_text(e))) + b_tmp_source = to_bytes(tmp_source, errors='surrogate_or_strict') + + # template the source data locally & get ready to transfer + try: + with open(b_tmp_source, 'rb') as f: + try: + template_data = to_text(f.read(), errors='surrogate_or_strict') + except UnicodeError: + raise AnsibleActionFail("Template source files must be utf-8 encoded") + + # add ansible 'template' vars + temp_vars = task_vars.copy() + old_vars = self._templar.available_variables + + self._templar.environment.newline_sequence = newline_sequence + if block_start_string is not None: + self._templar.environment.block_start_string = block_start_string + if block_end_string is not None: + self._templar.environment.block_end_string = block_end_string + if variable_start_string is not None: + self._templar.environment.variable_start_string = variable_start_string + if variable_end_string is not None: + self._templar.environment.variable_end_string = variable_end_string + self._templar.environment.trim_blocks = trim_blocks + self._templar.environment.lstrip_blocks = lstrip_blocks + self._templar.available_variables = temp_vars + resultant = self._templar.do_template(template_data, preserve_trailing_newlines=True, escape_backslashes=False) + self._templar.available_variables = old_vars + resource_definition = self._task.args.get('definition', None) + if not resource_definition: + new_module_args.pop('template') + new_module_args['definition'] = resultant + except AnsibleAction: + raise + except Exception as e: + raise AnsibleActionFail("%s: %s" % (type(e).__name__, to_text(e))) + finally: + self._loader.cleanup_tmp_file(b_tmp_source) + + # Execute the k8s_* module. + module_return = self._execute_module(module_name=self._task.action, module_args=new_module_args, task_vars=task_vars) + + # Delete tmp path + self._remove_tmp_path(self._connection._shell.tmpdir) + + result.update(module_return) + + return self._ensure_invocation(result) diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/plugins/connection/kubectl.py b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/connection/kubectl.py new file mode 100644 index 00000000..5c16fe9e --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/connection/kubectl.py @@ -0,0 +1,369 @@ +# Based on the docker connection plugin +# +# Connection plugin for configuring kubernetes containers with kubectl +# (c) 2017, XuXinkun <xuxinkun@gmail.com> +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see <http://www.gnu.org/licenses/>. +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = r""" + author: + - xuxinkun + + connection: kubectl + + short_description: Execute tasks in pods running on Kubernetes. + + description: + - Use the kubectl exec command to run tasks in, or put/fetch files to, pods running on the Kubernetes + container platform. + + requirements: + - kubectl (go binary) + + options: + kubectl_pod: + description: + - Pod name. + - Required when the host name does not match pod name. + default: '' + vars: + - name: ansible_kubectl_pod + env: + - name: K8S_AUTH_POD + kubectl_container: + description: + - Container name. + - Required when a pod contains more than one container. + default: '' + vars: + - name: ansible_kubectl_container + env: + - name: K8S_AUTH_CONTAINER + kubectl_namespace: + description: + - The namespace of the pod + default: '' + vars: + - name: ansible_kubectl_namespace + env: + - name: K8S_AUTH_NAMESPACE + kubectl_extra_args: + description: + - Extra arguments to pass to the kubectl command line. + - Please be aware that this passes information directly on the command line and it could expose sensitive data. + default: '' + vars: + - name: ansible_kubectl_extra_args + env: + - name: K8S_AUTH_EXTRA_ARGS + kubectl_kubeconfig: + description: + - Path to a kubectl config file. Defaults to I(~/.kube/config) + default: '' + vars: + - name: ansible_kubectl_kubeconfig + - name: ansible_kubectl_config + env: + - name: K8S_AUTH_KUBECONFIG + kubectl_context: + description: + - The name of a context found in the K8s config file. + default: '' + vars: + - name: ansible_kubectl_context + env: + - name: K8S_AUTH_CONTEXT + kubectl_host: + description: + - URL for accessing the API. + default: '' + vars: + - name: ansible_kubectl_host + - name: ansible_kubectl_server + env: + - name: K8S_AUTH_HOST + - name: K8S_AUTH_SERVER + kubectl_username: + description: + - Provide a username for authenticating with the API. + default: '' + vars: + - name: ansible_kubectl_username + - name: ansible_kubectl_user + env: + - name: K8S_AUTH_USERNAME + kubectl_password: + description: + - Provide a password for authenticating with the API. + - Please be aware that this passes information directly on the command line and it could expose sensitive data. + We recommend using the file based authentication options instead. + default: '' + vars: + - name: ansible_kubectl_password + env: + - name: K8S_AUTH_PASSWORD + kubectl_token: + description: + - API authentication bearer token. + - Please be aware that this passes information directly on the command line and it could expose sensitive data. + We recommend using the file based authentication options instead. + vars: + - name: ansible_kubectl_token + - name: ansible_kubectl_api_key + env: + - name: K8S_AUTH_TOKEN + - name: K8S_AUTH_API_KEY + client_cert: + description: + - Path to a certificate used to authenticate with the API. + default: '' + vars: + - name: ansible_kubectl_cert_file + - name: ansible_kubectl_client_cert + env: + - name: K8S_AUTH_CERT_FILE + aliases: [ kubectl_cert_file ] + client_key: + description: + - Path to a key file used to authenticate with the API. + default: '' + vars: + - name: ansible_kubectl_key_file + - name: ansible_kubectl_client_key + env: + - name: K8S_AUTH_KEY_FILE + aliases: [ kubectl_key_file ] + ca_cert: + description: + - Path to a CA certificate used to authenticate with the API. + default: '' + vars: + - name: ansible_kubectl_ssl_ca_cert + - name: ansible_kubectl_ca_cert + env: + - name: K8S_AUTH_SSL_CA_CERT + aliases: [ kubectl_ssl_ca_cert ] + validate_certs: + description: + - Whether or not to verify the API server's SSL certificate. Defaults to I(true). + default: '' + vars: + - name: ansible_kubectl_verify_ssl + - name: ansible_kubectl_validate_certs + env: + - name: K8S_AUTH_VERIFY_SSL + aliases: [ kubectl_verify_ssl ] +""" + +import distutils.spawn +import os +import os.path +import subprocess + +from ansible.parsing.yaml.loader import AnsibleLoader +from ansible.errors import AnsibleError, AnsibleFileNotFound +from ansible.module_utils.six.moves import shlex_quote +from ansible.module_utils._text import to_bytes +from ansible.plugins.connection import ConnectionBase, BUFSIZE +from ansible.utils.display import Display + +display = Display() + + +CONNECTION_TRANSPORT = 'kubectl' + +CONNECTION_OPTIONS = { + 'kubectl_container': '-c', + 'kubectl_namespace': '-n', + 'kubectl_kubeconfig': '--kubeconfig', + 'kubectl_context': '--context', + 'kubectl_host': '--server', + 'kubectl_username': '--username', + 'kubectl_password': '--password', + 'client_cert': '--client-certificate', + 'client_key': '--client-key', + 'ca_cert': '--certificate-authority', + 'validate_certs': '--insecure-skip-tls-verify', + 'kubectl_token': '--token' +} + + +class Connection(ConnectionBase): + ''' Local kubectl based connections ''' + + transport = CONNECTION_TRANSPORT + connection_options = CONNECTION_OPTIONS + documentation = DOCUMENTATION + has_pipelining = True + transport_cmd = None + + def __init__(self, play_context, new_stdin, *args, **kwargs): + super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs) + + # Note: kubectl runs commands as the user that started the container. + # It is impossible to set the remote user for a kubectl connection. + cmd_arg = '{0}_command'.format(self.transport) + if cmd_arg in kwargs: + self.transport_cmd = kwargs[cmd_arg] + else: + self.transport_cmd = distutils.spawn.find_executable(self.transport) + if not self.transport_cmd: + raise AnsibleError("{0} command not found in PATH".format(self.transport)) + + def _build_exec_cmd(self, cmd): + """ Build the local kubectl exec command to run cmd on remote_host + """ + local_cmd = [self.transport_cmd] + censored_local_cmd = [self.transport_cmd] + + # Build command options based on doc string + doc_yaml = AnsibleLoader(self.documentation).get_single_data() + for key in doc_yaml.get('options'): + if key.endswith('verify_ssl') and self.get_option(key) != '': + # Translate verify_ssl to skip_verify_ssl, and output as string + skip_verify_ssl = not self.get_option(key) + local_cmd.append(u'{0}={1}'.format(self.connection_options[key], str(skip_verify_ssl).lower())) + censored_local_cmd.append(u'{0}={1}'.format(self.connection_options[key], str(skip_verify_ssl).lower())) + elif not key.endswith('container') and self.get_option(key) and self.connection_options.get(key): + cmd_arg = self.connection_options[key] + local_cmd += [cmd_arg, self.get_option(key)] + # Redact password and token from console log + if key.endswith(('_token', '_password')): + censored_local_cmd += [cmd_arg, '********'] + + extra_args_name = u'{0}_extra_args'.format(self.transport) + if self.get_option(extra_args_name): + local_cmd += self.get_option(extra_args_name).split(' ') + censored_local_cmd += self.get_option(extra_args_name).split(' ') + + pod = self.get_option(u'{0}_pod'.format(self.transport)) + if not pod: + pod = self._play_context.remote_addr + # -i is needed to keep stdin open which allows pipelining to work + local_cmd += ['exec', '-i', pod] + censored_local_cmd += ['exec', '-i', pod] + + # if the pod has more than one container, then container is required + container_arg_name = u'{0}_container'.format(self.transport) + if self.get_option(container_arg_name): + local_cmd += ['-c', self.get_option(container_arg_name)] + censored_local_cmd += ['-c', self.get_option(container_arg_name)] + + local_cmd += ['--'] + cmd + censored_local_cmd += ['--'] + cmd + + return local_cmd, censored_local_cmd + + def _connect(self, port=None): + """ Connect to the container. Nothing to do """ + super(Connection, self)._connect() + if not self._connected: + display.vvv(u"ESTABLISH {0} CONNECTION".format(self.transport), host=self._play_context.remote_addr) + self._connected = True + + def exec_command(self, cmd, in_data=None, sudoable=False): + """ Run a command in the container """ + super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable) + + local_cmd, censored_local_cmd = self._build_exec_cmd([self._play_context.executable, '-c', cmd]) + + display.vvv("EXEC %s" % (censored_local_cmd,), host=self._play_context.remote_addr) + local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd] + p = subprocess.Popen(local_cmd, shell=False, stdin=subprocess.PIPE, + stdout=subprocess.PIPE, stderr=subprocess.PIPE) + + stdout, stderr = p.communicate(in_data) + return (p.returncode, stdout, stderr) + + def _prefix_login_path(self, remote_path): + ''' Make sure that we put files into a standard path + + If a path is relative, then we need to choose where to put it. + ssh chooses $HOME but we aren't guaranteed that a home dir will + exist in any given chroot. So for now we're choosing "/" instead. + This also happens to be the former default. + + Can revisit using $HOME instead if it's a problem + ''' + if not remote_path.startswith(os.path.sep): + remote_path = os.path.join(os.path.sep, remote_path) + return os.path.normpath(remote_path) + + def put_file(self, in_path, out_path): + """ Transfer a file from local to the container """ + super(Connection, self).put_file(in_path, out_path) + display.vvv("PUT %s TO %s" % (in_path, out_path), host=self._play_context.remote_addr) + + out_path = self._prefix_login_path(out_path) + if not os.path.exists(to_bytes(in_path, errors='surrogate_or_strict')): + raise AnsibleFileNotFound( + "file or module does not exist: %s" % in_path) + + out_path = shlex_quote(out_path) + # kubectl doesn't have native support for copying files into + # running containers, so we use kubectl exec to implement this + with open(to_bytes(in_path, errors='surrogate_or_strict'), 'rb') as in_file: + if not os.fstat(in_file.fileno()).st_size: + count = ' count=0' + else: + count = '' + args, dummy = self._build_exec_cmd([self._play_context.executable, "-c", "dd of=%s bs=%s%s" % (out_path, BUFSIZE, count)]) + args = [to_bytes(i, errors='surrogate_or_strict') for i in args] + try: + p = subprocess.Popen(args, stdin=in_file, + stdout=subprocess.PIPE, stderr=subprocess.PIPE) + except OSError: + raise AnsibleError("kubectl connection requires dd command in the container to put files") + stdout, stderr = p.communicate() + + if p.returncode != 0: + raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr)) + + def fetch_file(self, in_path, out_path): + """ Fetch a file from container to local. """ + super(Connection, self).fetch_file(in_path, out_path) + display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self._play_context.remote_addr) + + in_path = self._prefix_login_path(in_path) + out_dir = os.path.dirname(out_path) + + # kubectl doesn't have native support for fetching files from + # running containers, so we use kubectl exec to implement this + args, dummy = self._build_exec_cmd([self._play_context.executable, "-c", "dd if=%s bs=%s" % (in_path, BUFSIZE)]) + args = [to_bytes(i, errors='surrogate_or_strict') for i in args] + actual_out_path = os.path.join(out_dir, os.path.basename(in_path)) + with open(to_bytes(actual_out_path, errors='surrogate_or_strict'), 'wb') as out_file: + try: + p = subprocess.Popen(args, stdin=subprocess.PIPE, + stdout=out_file, stderr=subprocess.PIPE) + except OSError: + raise AnsibleError( + "{0} connection requires dd command in the container to fetch files".format(self.transport) + ) + stdout, stderr = p.communicate() + + if p.returncode != 0: + raise AnsibleError("failed to fetch file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr)) + + if actual_out_path != out_path: + os.rename(to_bytes(actual_out_path, errors='strict'), to_bytes(out_path, errors='strict')) + + def close(self): + """ Terminate the connection. Nothing to do for kubectl""" + super(Connection, self).close() + self._connected = False diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/plugins/doc_fragments/__init__.py b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/doc_fragments/__init__.py new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/doc_fragments/__init__.py diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/plugins/doc_fragments/helm_common_options.py b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/doc_fragments/helm_common_options.py new file mode 100644 index 00000000..f13bc1e3 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/doc_fragments/helm_common_options.py @@ -0,0 +1,34 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2020, Ansible Project +# Copyright: (c) 2020, Red Hat Inc. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# Options for common Helm modules + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +class ModuleDocFragment(object): + + DOCUMENTATION = r''' +options: + binary_path: + description: + - The path of a helm binary to use. + required: false + type: path + context: + description: + - Helm option to specify which kubeconfig context to use. + - If the value is not specified in the task, the value of environment variable C(K8S_AUTH_CONTEXT) will be used instead. + type: str + aliases: [ kube_context ] + kubeconfig: + description: + - Helm option to specify kubeconfig path to use. + - If the value is not specified in the task, the value of environment variable C(K8S_AUTH_KUBECONFIG) will be used instead. + type: path + aliases: [ kubeconfig_path ] +''' diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/plugins/doc_fragments/k8s_auth_options.py b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/doc_fragments/k8s_auth_options.py new file mode 100644 index 00000000..053caed2 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/doc_fragments/k8s_auth_options.py @@ -0,0 +1,97 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2018, Red Hat | Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# Options for authenticating with the API. + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +class ModuleDocFragment(object): + + DOCUMENTATION = r''' +options: + host: + description: + - Provide a URL for accessing the API. Can also be specified via K8S_AUTH_HOST environment variable. + type: str + api_key: + description: + - Token used to authenticate with the API. Can also be specified via K8S_AUTH_API_KEY environment variable. + type: str + kubeconfig: + description: + - Path to an existing Kubernetes config file. If not provided, and no other connection + options are provided, the openshift client will attempt to load the default + configuration file from I(~/.kube/config.json). Can also be specified via K8S_AUTH_KUBECONFIG environment + variable. + type: path + context: + description: + - The name of a context found in the config file. Can also be specified via K8S_AUTH_CONTEXT environment variable. + type: str + username: + description: + - Provide a username for authenticating with the API. Can also be specified via K8S_AUTH_USERNAME environment + variable. + - Please note that this only works with clusters configured to use HTTP Basic Auth. If your cluster has a + different form of authentication (e.g. OAuth2 in OpenShift), this option will not work as expected and you + should look into the C(k8s_auth) module, as that might do what you need. + type: str + password: + description: + - Provide a password for authenticating with the API. Can also be specified via K8S_AUTH_PASSWORD environment + variable. + - Please read the description of the C(username) option for a discussion of when this option is applicable. + type: str + client_cert: + description: + - Path to a certificate used to authenticate with the API. Can also be specified via K8S_AUTH_CERT_FILE environment + variable. + type: path + aliases: [ cert_file ] + client_key: + description: + - Path to a key file used to authenticate with the API. Can also be specified via K8S_AUTH_KEY_FILE environment + variable. + type: path + aliases: [ key_file ] + ca_cert: + description: + - Path to a CA certificate used to authenticate with the API. The full certificate chain must be provided to + avoid certificate validation errors. Can also be specified via K8S_AUTH_SSL_CA_CERT environment variable. + type: path + aliases: [ ssl_ca_cert ] + validate_certs: + description: + - Whether or not to verify the API server's SSL certificates. Can also be specified via K8S_AUTH_VERIFY_SSL + environment variable. + type: bool + aliases: [ verify_ssl ] + proxy: + description: + - The URL of an HTTP proxy to use for the connection. Can also be specified via K8S_AUTH_PROXY environment variable. + - Please note that this module does not pick up typical proxy settings from the environment (e.g. HTTP_PROXY). + type: str + persist_config: + description: + - Whether or not to save the kube config refresh tokens. + Can also be specified via K8S_AUTH_PERSIST_CONFIG environment variable. + - When the k8s context is using a user credentials with refresh tokens (like oidc or gke/gcloud auth), + the token is refreshed by the k8s python client library but not saved by default. So the old refresh token can + expire and the next auth might fail. Setting this flag to true will tell the k8s python client to save the + new refresh token to the kube config file. + - Default to false. + - Please note that the current version of the k8s python client library does not support setting this flag to True yet. + - "The fix for this k8s python library is here: https://github.com/kubernetes-client/python-base/pull/169" + type: bool +notes: + - "The OpenShift Python client wraps the K8s Python client, providing full access to + all of the APIS and models available on both platforms. For API version details and + additional information visit https://github.com/openshift/openshift-restclient-python" + - "To avoid SSL certificate validation errors when C(validate_certs) is I(True), the full + certificate chain for the API server must be provided via C(ca_cert) or in the + kubeconfig file." +''' diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/plugins/doc_fragments/k8s_name_options.py b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/doc_fragments/k8s_name_options.py new file mode 100644 index 00000000..fe4e5c47 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/doc_fragments/k8s_name_options.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2018, Red Hat | Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# Options for selecting or identifying a specific K8s object + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +class ModuleDocFragment(object): + + DOCUMENTATION = r''' +options: + api_version: + description: + - Use to specify the API version. + - Use to create, delete, or discover an object without providing a full resource definition. + - Use in conjunction with I(kind), I(name), and I(namespace) to identify a specific object. + - If I(resource definition) is provided, the I(apiVersion) value from the I(resource_definition) + will override this option. + type: str + default: v1 + aliases: + - api + - version + kind: + description: + - Use to specify an object model. + - Use to create, delete, or discover an object without providing a full resource definition. + - Use in conjunction with I(api_version), I(name), and I(namespace) to identify a specific object. + - If I(resource definition) is provided, the I(kind) value from the I(resource_definition) + will override this option. + type: str + name: + description: + - Use to specify an object name. + - Use to create, delete, or discover an object without providing a full resource definition. + - Use in conjunction with I(api_version), I(kind) and I(namespace) to identify a specific object. + - If I(resource definition) is provided, the I(metadata.name) value from the I(resource_definition) + will override this option. + type: str + namespace: + description: + - Use to specify an object namespace. + - Useful when creating, deleting, or discovering an object without providing a full resource definition. + - Use in conjunction with I(api_version), I(kind), and I(name) to identify a specific object. + - If I(resource definition) is provided, the I(metadata.namespace) value from the I(resource_definition) + will override this option. + type: str +''' diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/plugins/doc_fragments/k8s_resource_options.py b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/doc_fragments/k8s_resource_options.py new file mode 100644 index 00000000..b9dcfe16 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/doc_fragments/k8s_resource_options.py @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2018, Red Hat | Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# Options for providing an object configuration + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +class ModuleDocFragment(object): + + DOCUMENTATION = r''' +options: + resource_definition: + description: + - Provide a valid YAML definition (either as a string, list, or dict) for an object when creating or updating. + - "NOTE: I(kind), I(api_version), I(name), and I(namespace) will be overwritten by corresponding values found in the provided I(resource_definition)." + aliases: + - definition + - inline + src: + description: + - "Provide a path to a file containing a valid YAML definition of an object or objects to be created or updated. Mutually + exclusive with I(resource_definition). NOTE: I(kind), I(api_version), I(name), and I(namespace) will be + overwritten by corresponding values found in the configuration read in from the I(src) file." + - Reads from the local file system. To read from the Ansible controller's file system, including vaulted files, use the file lookup + plugin or template lookup plugin, combined with the from_yaml filter, and pass the result to + I(resource_definition). See Examples below. + - Mutually exclusive with I(template) in case of M(k8s) module. + type: path +''' diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/plugins/doc_fragments/k8s_scale_options.py b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/doc_fragments/k8s_scale_options.py new file mode 100644 index 00000000..0c01439a --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/doc_fragments/k8s_scale_options.py @@ -0,0 +1,43 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2018, Red Hat | Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# Options used by scale modules. + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +class ModuleDocFragment(object): + + DOCUMENTATION = r''' +options: + replicas: + description: + - The desired number of replicas. + type: int + required: True + current_replicas: + description: + - For Deployment, ReplicaSet, Replication Controller, only scale, if the number of existing replicas + matches. In the case of a Job, update parallelism only if the current parallelism value matches. + type: int + resource_version: + description: + - Only attempt to scale, if the current object version matches. + type: str + wait: + description: + - For Deployment, ReplicaSet, Replication Controller, wait for the status value of I(ready_replicas) to change + to the number of I(replicas). In the case of a Job, this option is ignored. + type: bool + default: yes + wait_timeout: + description: + - When C(wait) is I(True), the number of seconds to wait for the I(ready_replicas) status to equal I(replicas). + If the status is not reached within the allotted time, an error will result. In the case of a Job, this option + is ignored. + type: int + default: 20 +''' diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/plugins/doc_fragments/k8s_state_options.py b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/doc_fragments/k8s_state_options.py new file mode 100644 index 00000000..8f741ba8 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/doc_fragments/k8s_state_options.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2018, Red Hat | Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# Options for specifying object state + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +class ModuleDocFragment(object): + + DOCUMENTATION = r''' +options: + state: + description: + - Determines if an object should be created, patched, or deleted. When set to C(present), an object will be + created, if it does not already exist. If set to C(absent), an existing object will be deleted. If set to + C(present), an existing object will be patched, if its attributes differ from those specified using + I(resource_definition) or I(src). + type: str + default: present + choices: [ absent, present ] + force: + description: + - If set to C(yes), and I(state) is C(present), an existing object will be replaced. + type: bool + default: no +''' diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/plugins/doc_fragments/k8s_wait_options.py b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/doc_fragments/k8s_wait_options.py new file mode 100644 index 00000000..867901bb --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/doc_fragments/k8s_wait_options.py @@ -0,0 +1,67 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2020, Red Hat | Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# Options for specifying object wait + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +class ModuleDocFragment(object): + + DOCUMENTATION = r''' +options: + wait: + description: + - Whether to wait for certain resource kinds to end up in the desired state. + - By default the module exits once Kubernetes has received the request. + - Implemented for C(state=present) for C(Deployment), C(DaemonSet) and C(Pod), and for C(state=absent) for all resource kinds. + - For resource kinds without an implementation, C(wait) returns immediately unless C(wait_condition) is set. + default: no + type: bool + wait_sleep: + description: + - Number of seconds to sleep between checks. + default: 5 + type: int + wait_timeout: + description: + - How long in seconds to wait for the resource to end up in the desired state. + - Ignored if C(wait) is not set. + default: 120 + type: int + wait_condition: + description: + - Specifies a custom condition on the status to wait for. + - Ignored if C(wait) is not set or is set to False. + suboptions: + type: + type: str + description: + - The type of condition to wait for. + - For example, the C(Pod) resource will set the C(Ready) condition (among others). + - Required if you are specifying a C(wait_condition). + - If left empty, the C(wait_condition) field will be ignored. + - The possible types for a condition are specific to each resource type in Kubernetes. + - See the API documentation of the status field for a given resource to see possible choices. + status: + type: str + description: + - The value of the status field in your desired condition. + - For example, if a C(Deployment) is paused, the C(Progressing) C(type) will have the C(Unknown) status. + choices: + - True + - False + - Unknown + default: "True" + reason: + type: str + description: + - The value of the reason field in your desired condition + - For example, if a C(Deployment) is paused, The C(Progressing) C(type) will have the C(DeploymentPaused) reason. + - The possible reasons in a condition are specific to each resource type in Kubernetes. + - See the API documentation of the status field for a given resource to see possible choices. + type: dict +''' diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/plugins/filter/k8s.py b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/filter/k8s.py new file mode 100644 index 00000000..3597b852 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/filter/k8s.py @@ -0,0 +1,33 @@ +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +try: + from openshift.helper.hashes import generate_hash + HAS_GENERATE_HASH = True +except ImportError: + HAS_GENERATE_HASH = False + +from ansible.errors import AnsibleFilterError + + +def k8s_config_resource_name(resource): + if not HAS_GENERATE_HASH: + raise AnsibleFilterError("k8s_config_resource_name requires openshift>=0.7.2") + try: + return resource['metadata']['name'] + '-' + generate_hash(resource) + except KeyError: + raise AnsibleFilterError("resource must have a metadata.name key to generate a resource name") + + +# ---- Ansible filters ---- +class FilterModule(object): + + def filters(self): + return { + 'k8s_config_resource_name': k8s_config_resource_name + } diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/plugins/inventory/k8s.py b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/inventory/k8s.py new file mode 100644 index 00000000..ede54375 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/inventory/k8s.py @@ -0,0 +1,363 @@ +# Copyright (c) 2018 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' + name: k8s + plugin_type: inventory + author: + - Chris Houseknecht <@chouseknecht> + - Fabian von Feilitzsch <@fabianvf> + + short_description: Kubernetes (K8s) inventory source + + description: + - Fetch containers and services for one or more clusters. + - Groups by cluster name, namespace, namespace_services, namespace_pods, and labels. + - Uses the kubectl connection plugin to access the Kubernetes cluster. + - Uses k8s.(yml|yaml) YAML configuration file to set parameter values. + + options: + plugin: + description: token that ensures this is a source file for the 'k8s' plugin. + required: True + choices: ['k8s'] + connections: + description: + - Optional list of cluster connection settings. If no connections are provided, the default + I(~/.kube/config) and active context will be used, and objects will be returned for all namespaces + the active user is authorized to access. + suboptions: + name: + description: + - Optional name to assign to the cluster. If not provided, a name is constructed from the server + and port. + kubeconfig: + description: + - Path to an existing Kubernetes config file. If not provided, and no other connection + options are provided, the OpenShift client will attempt to load the default + configuration file from I(~/.kube/config.json). Can also be specified via K8S_AUTH_KUBECONFIG + environment variable. + context: + description: + - The name of a context found in the config file. Can also be specified via K8S_AUTH_CONTEXT environment + variable. + host: + description: + - Provide a URL for accessing the API. Can also be specified via K8S_AUTH_HOST environment variable. + api_key: + description: + - Token used to authenticate with the API. Can also be specified via K8S_AUTH_API_KEY environment + variable. + username: + description: + - Provide a username for authenticating with the API. Can also be specified via K8S_AUTH_USERNAME + environment variable. + password: + description: + - Provide a password for authenticating with the API. Can also be specified via K8S_AUTH_PASSWORD + environment variable. + client_cert: + description: + - Path to a certificate used to authenticate with the API. Can also be specified via K8S_AUTH_CERT_FILE + environment variable. + aliases: [ cert_file ] + client_key: + description: + - Path to a key file used to authenticate with the API. Can also be specified via K8S_AUTH_KEY_FILE + environment variable. + aliases: [ key_file ] + ca_cert: + description: + - Path to a CA certificate used to authenticate with the API. Can also be specified via + K8S_AUTH_SSL_CA_CERT environment variable. + aliases: [ ssl_ca_cert ] + validate_certs: + description: + - "Whether or not to verify the API server's SSL certificates. Can also be specified via + K8S_AUTH_VERIFY_SSL environment variable." + type: bool + aliases: [ verify_ssl ] + namespaces: + description: + - List of namespaces. If not specified, will fetch all containers for all namespaces user is authorized + to access. + + requirements: + - "python >= 2.7" + - "openshift >= 0.6" + - "PyYAML >= 3.11" +''' + +EXAMPLES = ''' +# File must be named k8s.yaml or k8s.yml + +# Authenticate with token, and return all pods and services for all namespaces +plugin: community.kubernetes.k8s +connections: + - host: https://192.168.64.4:8443 + api_key: xxxxxxxxxxxxxxxx + validate_certs: false + +# Use default config (~/.kube/config) file and active context, and return objects for a specific namespace +plugin: community.kubernetes.k8s +connections: + - namespaces: + - testing + +# Use a custom config file, and a specific context. +plugin: community.kubernetes.k8s +connections: + - kubeconfig: /path/to/config + context: 'awx/192-168-64-4:8443/developer' +''' + +import json + +from ansible.errors import AnsibleError +from ansible_collections.community.kubernetes.plugins.module_utils.common import K8sAnsibleMixin, HAS_K8S_MODULE_HELPER, k8s_import_exception +from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable + +try: + from openshift.dynamic.exceptions import DynamicApiError +except ImportError: + pass + + +def format_dynamic_api_exc(exc): + if exc.body: + if exc.headers and exc.headers.get('Content-Type') == 'application/json': + message = json.loads(exc.body).get('message') + if message: + return message + return exc.body + else: + return '%s Reason: %s' % (exc.status, exc.reason) + + +class K8sInventoryException(Exception): + pass + + +class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable, K8sAnsibleMixin): + NAME = 'community.kubernetes.k8s' + + connection_plugin = 'community.kubernetes.kubectl' + transport = 'kubectl' + + def parse(self, inventory, loader, path, cache=True): + super(InventoryModule, self).parse(inventory, loader, path) + cache_key = self._get_cache_prefix(path) + config_data = self._read_config_data(path) + self.setup(config_data, cache, cache_key) + + def setup(self, config_data, cache, cache_key): + connections = config_data.get('connections') + + if not HAS_K8S_MODULE_HELPER: + raise K8sInventoryException( + "This module requires the OpenShift Python client. Try `pip install openshift`. Detail: {0}".format(k8s_import_exception) + ) + + source_data = None + if cache and cache_key in self._cache: + try: + source_data = self._cache[cache_key] + except KeyError: + pass + + if not source_data: + self.fetch_objects(connections) + + def fetch_objects(self, connections): + + if connections: + if not isinstance(connections, list): + raise K8sInventoryException("Expecting connections to be a list.") + + for connection in connections: + if not isinstance(connection, dict): + raise K8sInventoryException("Expecting connection to be a dictionary.") + client = self.get_api_client(**connection) + name = connection.get('name', self.get_default_host_name(client.configuration.host)) + if connection.get('namespaces'): + namespaces = connection['namespaces'] + else: + namespaces = self.get_available_namespaces(client) + for namespace in namespaces: + self.get_pods_for_namespace(client, name, namespace) + self.get_services_for_namespace(client, name, namespace) + else: + client = self.get_api_client() + name = self.get_default_host_name(client.configuration.host) + namespaces = self.get_available_namespaces(client) + for namespace in namespaces: + self.get_pods_for_namespace(client, name, namespace) + self.get_services_for_namespace(client, name, namespace) + + @staticmethod + def get_default_host_name(host): + return host.replace('https://', '').replace('http://', '').replace('.', '-').replace(':', '_') + + def get_available_namespaces(self, client): + v1_namespace = client.resources.get(api_version='v1', kind='Namespace') + try: + obj = v1_namespace.get() + except DynamicApiError as exc: + self.display.debug(exc) + raise K8sInventoryException('Error fetching Namespace list: %s' % format_dynamic_api_exc(exc)) + return [namespace.metadata.name for namespace in obj.items] + + def get_pods_for_namespace(self, client, name, namespace): + v1_pod = client.resources.get(api_version='v1', kind='Pod') + try: + obj = v1_pod.get(namespace=namespace) + except DynamicApiError as exc: + self.display.debug(exc) + raise K8sInventoryException('Error fetching Pod list: %s' % format_dynamic_api_exc(exc)) + + namespace_group = 'namespace_{0}'.format(namespace) + namespace_pods_group = '{0}_pods'.format(namespace_group) + + self.inventory.add_group(name) + self.inventory.add_group(namespace_group) + self.inventory.add_child(name, namespace_group) + self.inventory.add_group(namespace_pods_group) + self.inventory.add_child(namespace_group, namespace_pods_group) + + for pod in obj.items: + pod_name = pod.metadata.name + pod_groups = [] + pod_annotations = {} if not pod.metadata.annotations else dict(pod.metadata.annotations) + + if pod.metadata.labels: + # create a group for each label_value + for key, value in pod.metadata.labels: + group_name = 'label_{0}_{1}'.format(key, value) + if group_name not in pod_groups: + pod_groups.append(group_name) + self.inventory.add_group(group_name) + pod_labels = dict(pod.metadata.labels) + else: + pod_labels = {} + + if not pod.status.containerStatuses: + continue + + for container in pod.status.containerStatuses: + # add each pod_container to the namespace group, and to each label_value group + container_name = '{0}_{1}'.format(pod.metadata.name, container.name) + self.inventory.add_host(container_name) + self.inventory.add_child(namespace_pods_group, container_name) + if pod_groups: + for group in pod_groups: + self.inventory.add_child(group, container_name) + + # Add hostvars + self.inventory.set_variable(container_name, 'object_type', 'pod') + self.inventory.set_variable(container_name, 'labels', pod_labels) + self.inventory.set_variable(container_name, 'annotations', pod_annotations) + self.inventory.set_variable(container_name, 'cluster_name', pod.metadata.clusterName) + self.inventory.set_variable(container_name, 'pod_node_name', pod.spec.nodeName) + self.inventory.set_variable(container_name, 'pod_name', pod.spec.name) + self.inventory.set_variable(container_name, 'pod_host_ip', pod.status.hostIP) + self.inventory.set_variable(container_name, 'pod_phase', pod.status.phase) + self.inventory.set_variable(container_name, 'pod_ip', pod.status.podIP) + self.inventory.set_variable(container_name, 'pod_self_link', pod.metadata.selfLink) + self.inventory.set_variable(container_name, 'pod_resource_version', pod.metadata.resourceVersion) + self.inventory.set_variable(container_name, 'pod_uid', pod.metadata.uid) + self.inventory.set_variable(container_name, 'container_name', container.image) + self.inventory.set_variable(container_name, 'container_image', container.image) + if container.state.running: + self.inventory.set_variable(container_name, 'container_state', 'Running') + if container.state.terminated: + self.inventory.set_variable(container_name, 'container_state', 'Terminated') + if container.state.waiting: + self.inventory.set_variable(container_name, 'container_state', 'Waiting') + self.inventory.set_variable(container_name, 'container_ready', container.ready) + self.inventory.set_variable(container_name, 'ansible_remote_tmp', '/tmp/') + self.inventory.set_variable(container_name, 'ansible_connection', self.connection_plugin) + self.inventory.set_variable(container_name, 'ansible_{0}_pod'.format(self.transport), + pod_name) + self.inventory.set_variable(container_name, 'ansible_{0}_container'.format(self.transport), + container.name) + self.inventory.set_variable(container_name, 'ansible_{0}_namespace'.format(self.transport), + namespace) + + def get_services_for_namespace(self, client, name, namespace): + v1_service = client.resources.get(api_version='v1', kind='Service') + try: + obj = v1_service.get(namespace=namespace) + except DynamicApiError as exc: + self.display.debug(exc) + raise K8sInventoryException('Error fetching Service list: %s' % format_dynamic_api_exc(exc)) + + namespace_group = 'namespace_{0}'.format(namespace) + namespace_services_group = '{0}_services'.format(namespace_group) + + self.inventory.add_group(name) + self.inventory.add_group(namespace_group) + self.inventory.add_child(name, namespace_group) + self.inventory.add_group(namespace_services_group) + self.inventory.add_child(namespace_group, namespace_services_group) + + for service in obj.items: + service_name = service.metadata.name + service_labels = {} if not service.metadata.labels else dict(service.metadata.labels) + service_annotations = {} if not service.metadata.annotations else dict(service.metadata.annotations) + + self.inventory.add_host(service_name) + + if service.metadata.labels: + # create a group for each label_value + for key, value in service.metadata.labels: + group_name = 'label_{0}_{1}'.format(key, value) + self.inventory.add_group(group_name) + self.inventory.add_child(group_name, service_name) + + try: + self.inventory.add_child(namespace_services_group, service_name) + except AnsibleError: + raise + + ports = [{'name': port.name, + 'port': port.port, + 'protocol': port.protocol, + 'targetPort': port.targetPort, + 'nodePort': port.nodePort} for port in service.spec.ports or []] + + # add hostvars + self.inventory.set_variable(service_name, 'object_type', 'service') + self.inventory.set_variable(service_name, 'labels', service_labels) + self.inventory.set_variable(service_name, 'annotations', service_annotations) + self.inventory.set_variable(service_name, 'cluster_name', service.metadata.clusterName) + self.inventory.set_variable(service_name, 'ports', ports) + self.inventory.set_variable(service_name, 'type', service.spec.type) + self.inventory.set_variable(service_name, 'self_link', service.metadata.selfLink) + self.inventory.set_variable(service_name, 'resource_version', service.metadata.resourceVersion) + self.inventory.set_variable(service_name, 'uid', service.metadata.uid) + + if service.spec.externalTrafficPolicy: + self.inventory.set_variable(service_name, 'external_traffic_policy', + service.spec.externalTrafficPolicy) + if service.spec.externalIPs: + self.inventory.set_variable(service_name, 'external_ips', service.spec.externalIPs) + + if service.spec.externalName: + self.inventory.set_variable(service_name, 'external_name', service.spec.externalName) + + if service.spec.healthCheckNodePort: + self.inventory.set_variable(service_name, 'health_check_node_port', + service.spec.healthCheckNodePort) + if service.spec.loadBalancerIP: + self.inventory.set_variable(service_name, 'load_balancer_ip', + service.spec.loadBalancerIP) + if service.spec.selector: + self.inventory.set_variable(service_name, 'selector', dict(service.spec.selector)) + + if hasattr(service.status.loadBalancer, 'ingress') and service.status.loadBalancer.ingress: + load_balancer = [{'hostname': ingress.hostname, + 'ip': ingress.ip} for ingress in service.status.loadBalancer.ingress] + self.inventory.set_variable(service_name, 'load_balancer', load_balancer) diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/plugins/inventory/openshift.py b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/inventory/openshift.py new file mode 100644 index 00000000..f6c393bd --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/inventory/openshift.py @@ -0,0 +1,202 @@ +# Copyright (c) 2018 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +DOCUMENTATION = ''' + name: openshift + plugin_type: inventory + author: + - Chris Houseknecht <@chouseknecht> + + short_description: OpenShift inventory source + + description: + - Fetch containers, services and routes for one or more clusters + - Groups by cluster name, namespace, namespace_services, namespace_pods, namespace_routes, and labels + - Uses openshift.(yml|yaml) YAML configuration file to set parameter values. + + options: + plugin: + description: token that ensures this is a source file for the 'openshift' plugin. + required: True + choices: ['openshift'] + connections: + description: + - Optional list of cluster connection settings. If no connections are provided, the default + I(~/.kube/config) and active context will be used, and objects will be returned for all namespaces + the active user is authorized to access. + suboptions: + name: + description: + - Optional name to assign to the cluster. If not provided, a name is constructed from the server + and port. + kubeconfig: + description: + - Path to an existing Kubernetes config file. If not provided, and no other connection + options are provided, the OpenShift client will attempt to load the default + configuration file from I(~/.kube/config.json). Can also be specified via K8S_AUTH_KUBECONFIG + environment variable. + context: + description: + - The name of a context found in the config file. Can also be specified via K8S_AUTH_CONTEXT environment + variable. + host: + description: + - Provide a URL for accessing the API. Can also be specified via K8S_AUTH_HOST environment variable. + api_key: + description: + - Token used to authenticate with the API. Can also be specified via K8S_AUTH_API_KEY environment + variable. + username: + description: + - Provide a username for authenticating with the API. Can also be specified via K8S_AUTH_USERNAME + environment variable. + password: + description: + - Provide a password for authenticating with the API. Can also be specified via K8S_AUTH_PASSWORD + environment variable. + client_cert: + description: + - Path to a certificate used to authenticate with the API. Can also be specified via K8S_AUTH_CERT_FILE + environment variable. + aliases: [ cert_file ] + client_key: + description: + - Path to a key file used to authenticate with the API. Can also be specified via K8S_AUTH_KEY_FILE + environment variable. + aliases: [ key_file ] + ca_cert: + description: + - Path to a CA certificate used to authenticate with the API. Can also be specified via + K8S_AUTH_SSL_CA_CERT environment variable. + aliases: [ ssl_ca_cert ] + validate_certs: + description: + - "Whether or not to verify the API server's SSL certificates. Can also be specified via + K8S_AUTH_VERIFY_SSL environment variable." + type: bool + aliases: [ verify_ssl ] + namespaces: + description: + - List of namespaces. If not specified, will fetch all containers for all namespaces user is authorized + to access. + + requirements: + - "python >= 2.7" + - "openshift >= 0.6" + - "PyYAML >= 3.11" +''' + +EXAMPLES = ''' +# File must be named openshift.yaml or openshift.yml + +# Authenticate with token, and return all pods and services for all namespaces +plugin: community.kubernetes.openshift +connections: + - host: https://192.168.64.4:8443 + api_key: xxxxxxxxxxxxxxxx + verify_ssl: false + +# Use default config (~/.kube/config) file and active context, and return objects for a specific namespace +plugin: community.kubernetes.openshift +connections: + - namespaces: + - testing + +# Use a custom config file, and a specific context. +plugin: community.kubernetes.openshift +connections: + - kubeconfig: /path/to/config + context: 'awx/192-168-64-4:8443/developer' +''' + +from ansible_collections.community.kubernetes.plugins.inventory.k8s import K8sInventoryException, InventoryModule as K8sInventoryModule, format_dynamic_api_exc + +try: + from openshift.dynamic.exceptions import DynamicApiError +except ImportError: + pass + + +class InventoryModule(K8sInventoryModule): + NAME = 'community.kubernetes.openshift' + + transport = 'oc' + + def fetch_objects(self, connections): + super(InventoryModule, self).fetch_objects(connections) + + if connections: + if not isinstance(connections, list): + raise K8sInventoryException("Expecting connections to be a list.") + + for connection in connections: + client = self.get_api_client(**connection) + name = connection.get('name', self.get_default_host_name(client.configuration.host)) + if connection.get('namespaces'): + namespaces = connection['namespaces'] + else: + namespaces = self.get_available_namespaces(client) + for namespace in namespaces: + self.get_routes_for_namespace(client, name, namespace) + else: + client = self.get_api_client() + name = self.get_default_host_name(client.configuration.host) + namespaces = self.get_available_namespaces(client) + for namespace in namespaces: + self.get_routes_for_namespace(client, name, namespace) + + def get_routes_for_namespace(self, client, name, namespace): + v1_route = client.resources.get(api_version='v1', kind='Route') + try: + obj = v1_route.get(namespace=namespace) + except DynamicApiError as exc: + self.display.debug(exc) + raise K8sInventoryException('Error fetching Routes list: %s' % format_dynamic_api_exc(exc)) + + namespace_group = 'namespace_{0}'.format(namespace) + namespace_routes_group = '{0}_routes'.format(namespace_group) + + self.inventory.add_group(name) + self.inventory.add_group(namespace_group) + self.inventory.add_child(name, namespace_group) + self.inventory.add_group(namespace_routes_group) + self.inventory.add_child(namespace_group, namespace_routes_group) + for route in obj.items: + route_name = route.metadata.name + route_annotations = {} if not route.metadata.annotations else dict(route.metadata.annotations) + + self.inventory.add_host(route_name) + + if route.metadata.labels: + # create a group for each label_value + for key, value in route.metadata.labels: + group_name = 'label_{0}_{1}'.format(key, value) + self.inventory.add_group(group_name) + self.inventory.add_child(group_name, route_name) + route_labels = dict(route.metadata.labels) + else: + route_labels = {} + + self.inventory.add_child(namespace_routes_group, route_name) + + # add hostvars + self.inventory.set_variable(route_name, 'labels', route_labels) + self.inventory.set_variable(route_name, 'annotations', route_annotations) + self.inventory.set_variable(route_name, 'cluster_name', route.metadata.clusterName) + self.inventory.set_variable(route_name, 'object_type', 'route') + self.inventory.set_variable(route_name, 'self_link', route.metadata.selfLink) + self.inventory.set_variable(route_name, 'resource_version', route.metadata.resourceVersion) + self.inventory.set_variable(route_name, 'uid', route.metadata.uid) + + if route.spec.host: + self.inventory.set_variable(route_name, 'host', route.spec.host) + + if route.spec.path: + self.inventory.set_variable(route_name, 'path', route.spec.path) + + if hasattr(route.spec.port, 'targetPort') and route.spec.port.targetPort: + self.inventory.set_variable(route_name, 'port', dict(route.spec.port)) diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/plugins/lookup/k8s.py b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/lookup/k8s.py new file mode 100644 index 00000000..68849053 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/lookup/k8s.py @@ -0,0 +1,287 @@ +# +# Copyright 2018 Red Hat | Ansible +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see <http://www.gnu.org/licenses/>. + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +DOCUMENTATION = ''' + lookup: k8s + + short_description: Query the K8s API + + description: + - Uses the OpenShift Python client to fetch a specific object by name, all matching objects within a + namespace, or all matching objects for all namespaces, as well as information about the cluster. + - Provides access the full range of K8s APIs. + - Enables authentication via config file, certificates, password or token. + + options: + cluster_info: + description: + - Use to specify the type of cluster information you are attempting to retrieve. Will take priority + over all the other options. + api_version: + description: + - Use to specify the API version. If I(resource definition) is provided, the I(apiVersion) from the + I(resource_definition) will override this option. + default: v1 + kind: + description: + - Use to specify an object model. If I(resource definition) is provided, the I(kind) from a + I(resource_definition) will override this option. + required: true + resource_name: + description: + - Fetch a specific object by name. If I(resource definition) is provided, the I(metadata.name) value + from the I(resource_definition) will override this option. + namespace: + description: + - Limit the objects returned to a specific namespace. If I(resource definition) is provided, the + I(metadata.namespace) value from the I(resource_definition) will override this option. + label_selector: + description: + - Additional labels to include in the query. Ignored when I(resource_name) is provided. + field_selector: + description: + - Specific fields on which to query. Ignored when I(resource_name) is provided. + resource_definition: + description: + - "Provide a YAML configuration for an object. NOTE: I(kind), I(api_version), I(resource_name), + and I(namespace) will be overwritten by corresponding values found in the provided I(resource_definition)." + src: + description: + - "Provide a path to a file containing a valid YAML definition of an object dated. Mutually + exclusive with I(resource_definition). NOTE: I(kind), I(api_version), I(resource_name), and I(namespace) + will be overwritten by corresponding values found in the configuration read in from the I(src) file." + - Reads from the local file system. To read from the Ansible controller's file system, use the file lookup + plugin or template lookup plugin, combined with the from_yaml filter, and pass the result to + I(resource_definition). See Examples below. + host: + description: + - Provide a URL for accessing the API. Can also be specified via K8S_AUTH_HOST environment variable. + api_key: + description: + - Token used to authenticate with the API. Can also be specified via K8S_AUTH_API_KEY environment variable. + kubeconfig: + description: + - Path to an existing Kubernetes config file. If not provided, and no other connection + options are provided, the openshift client will attempt to load the default + configuration file from I(~/.kube/config.json). Can also be specified via K8S_AUTH_KUBECONFIG environment + variable. + context: + description: + - The name of a context found in the config file. Can also be specified via K8S_AUTH_CONTEXT environment + variable. + username: + description: + - Provide a username for authenticating with the API. Can also be specified via K8S_AUTH_USERNAME environment + variable. + password: + description: + - Provide a password for authenticating with the API. Can also be specified via K8S_AUTH_PASSWORD environment + variable. + client_cert: + description: + - Path to a certificate used to authenticate with the API. Can also be specified via K8S_AUTH_CERT_FILE + environment + variable. + aliases: [ cert_file ] + client_key: + description: + - Path to a key file used to authenticate with the API. Can also be specified via K8S_AUTH_KEY_FILE environment + variable. + aliases: [ key_file ] + ca_cert: + description: + - Path to a CA certificate used to authenticate with the API. Can also be specified via K8S_AUTH_SSL_CA_CERT + environment variable. + aliases: [ ssl_ca_cert ] + validate_certs: + description: + - Whether or not to verify the API server's SSL certificates. Can also be specified via K8S_AUTH_VERIFY_SSL + environment variable. + type: bool + aliases: [ verify_ssl ] + + requirements: + - "python >= 2.7" + - "openshift >= 0.6" + - "PyYAML >= 3.11" + + notes: + - "The OpenShift Python client wraps the K8s Python client, providing full access to + all of the APIS and models available on both platforms. For API version details and + additional information visit https://github.com/openshift/openshift-restclient-python" +''' + +EXAMPLES = """ +- name: Fetch a list of namespaces + set_fact: + projects: "{{ lookup('community.kubernetes.k8s', api_version='v1', kind='Namespace') }}" + +- name: Fetch all deployments + set_fact: + deployments: "{{ lookup('community.kubernetes.k8s', kind='Deployment') }}" + +- name: Fetch all deployments in a namespace + set_fact: + deployments: "{{ lookup('community.kubernetes.k8s', kind='Deployment', namespace='testing') }}" + +- name: Fetch a specific deployment by name + set_fact: + deployments: "{{ lookup('community.kubernetes.k8s', kind='Deployment', namespace='testing', resource_name='elastic') }}" + +- name: Fetch with label selector + set_fact: + service: "{{ lookup('community.kubernetes.k8s', kind='Service', label_selector='app=galaxy') }}" + +# Use parameters from a YAML config + +- name: Load config from the Ansible controller filesystem + set_fact: + config: "{{ lookup('file', 'service.yml') | from_yaml }}" + +- name: Using the config (loaded from a file in prior task), fetch the latest version of the object + set_fact: + service: "{{ lookup('community.kubernetes.k8s', resource_definition=config) }}" + +- name: Use a config from the local filesystem + set_fact: + service: "{{ lookup('community.kubernetes.k8s', src='service.yml') }}" +""" + +RETURN = """ + _list: + description: + - One ore more object definitions returned from the API. + type: complex + contains: + api_version: + description: The versioned schema of this representation of an object. + returned: success + type: str + kind: + description: Represents the REST resource this object represents. + returned: success + type: str + metadata: + description: Standard object metadata. Includes name, namespace, annotations, labels, etc. + returned: success + type: complex + spec: + description: Specific attributes of the object. Will vary based on the I(api_version) and I(kind). + returned: success + type: complex + status: + description: Current status details for the object. + returned: success + type: complex +""" + +from ansible.errors import AnsibleError +from ansible.module_utils.common._collections_compat import KeysView +from ansible.plugins.lookup import LookupBase + +from ansible_collections.community.kubernetes.plugins.module_utils.common import K8sAnsibleMixin + + +try: + from openshift.dynamic.exceptions import NotFoundError + HAS_K8S_MODULE_HELPER = True + k8s_import_exception = None +except ImportError as e: + HAS_K8S_MODULE_HELPER = False + k8s_import_exception = e + + +class KubernetesLookup(K8sAnsibleMixin): + + def __init__(self): + + if not HAS_K8S_MODULE_HELPER: + raise Exception( + "Requires the OpenShift Python client. Try `pip install openshift`. Detail: {0}".format(k8s_import_exception) + ) + + self.kind = None + self.name = None + self.namespace = None + self.api_version = None + self.label_selector = None + self.field_selector = None + self.include_uninitialized = None + self.resource_definition = None + self.helper = None + self.connection = {} + + def fail(self, msg=None): + raise AnsibleError(msg) + + def run(self, terms, variables=None, **kwargs): + self.params = kwargs + self.client = self.get_api_client() + + cluster_info = kwargs.get('cluster_info') + if cluster_info == 'version': + return [self.client.version] + if cluster_info == 'api_groups': + if isinstance(self.client.resources.api_groups, KeysView): + return [list(self.client.resources.api_groups)] + return [self.client.resources.api_groups] + + self.kind = kwargs.get('kind') + self.name = kwargs.get('resource_name') + self.namespace = kwargs.get('namespace') + self.api_version = kwargs.get('api_version', 'v1') + self.label_selector = kwargs.get('label_selector') + self.field_selector = kwargs.get('field_selector') + self.include_uninitialized = kwargs.get('include_uninitialized', False) + + resource_definition = kwargs.get('resource_definition') + src = kwargs.get('src') + if src: + resource_definition = self.load_resource_definitions(src)[0] + if resource_definition: + self.kind = resource_definition.get('kind', self.kind) + self.api_version = resource_definition.get('apiVersion', self.api_version) + self.name = resource_definition.get('metadata', {}).get('name', self.name) + self.namespace = resource_definition.get('metadata', {}).get('namespace', self.namespace) + + if not self.kind: + raise AnsibleError( + "Error: no Kind specified. Use the 'kind' parameter, or provide an object YAML configuration " + "using the 'resource_definition' parameter." + ) + + resource = self.find_resource(self.kind, self.api_version, fail=True) + try: + k8s_obj = resource.get(name=self.name, namespace=self.namespace, label_selector=self.label_selector, field_selector=self.field_selector) + except NotFoundError: + return [] + + if self.name: + return [k8s_obj.to_dict()] + + return k8s_obj.to_dict().get('items') + + +class LookupModule(LookupBase): + + def run(self, terms, variables=None, **kwargs): + return KubernetesLookup().run(terms, variables=variables, **kwargs) diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/plugins/module_utils/__init__.py b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/module_utils/__init__.py new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/module_utils/__init__.py diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/plugins/module_utils/common.py b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/module_utils/common.py new file mode 100644 index 00000000..d303eab6 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/module_utils/common.py @@ -0,0 +1,818 @@ +# Copyright 2018 Red Hat | Ansible +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see <http://www.gnu.org/licenses/>. + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import time +import os +import traceback +import sys +from datetime import datetime +from distutils.version import LooseVersion + + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.six import iteritems, string_types +from ansible.module_utils._text import to_native +from ansible.module_utils.common.dict_transformations import dict_merge + +K8S_IMP_ERR = None +try: + import kubernetes + import openshift + from openshift.dynamic import DynamicClient + from openshift.dynamic.exceptions import ( + ResourceNotFoundError, ResourceNotUniqueError, NotFoundError, DynamicApiError, + ConflictError, ForbiddenError, MethodNotAllowedError) + HAS_K8S_MODULE_HELPER = True + k8s_import_exception = None +except ImportError as e: + HAS_K8S_MODULE_HELPER = False + k8s_import_exception = e + K8S_IMP_ERR = traceback.format_exc() + +YAML_IMP_ERR = None +try: + import yaml + HAS_YAML = True +except ImportError: + YAML_IMP_ERR = traceback.format_exc() + HAS_YAML = False + +K8S_CONFIG_HASH_IMP_ERR = None +try: + from openshift.helper.hashes import generate_hash + from openshift.dynamic.exceptions import KubernetesValidateMissing + HAS_K8S_CONFIG_HASH = True +except ImportError: + K8S_CONFIG_HASH_IMP_ERR = traceback.format_exc() + HAS_K8S_CONFIG_HASH = False + +HAS_K8S_APPLY = None +try: + from openshift.dynamic.apply import apply_object + HAS_K8S_APPLY = True +except ImportError: + HAS_K8S_APPLY = False + +try: + import urllib3 + urllib3.disable_warnings() +except ImportError: + pass + +try: + from openshift.dynamic.apply import recursive_diff +except ImportError: + from ansible.module_utils.common.dict_transformations import recursive_diff + +try: + try: + # >=0.10 + from openshift.dynamic.resource import ResourceInstance + except ImportError: + # <0.10 + from openshift.dynamic.client import ResourceInstance + HAS_K8S_INSTANCE_HELPER = True + k8s_import_exception = None +except ImportError as e: + HAS_K8S_INSTANCE_HELPER = False + k8s_import_exception = e + K8S_IMP_ERR = traceback.format_exc() + + +def list_dict_str(value): + if isinstance(value, (list, dict, string_types)): + return value + raise TypeError + + +ARG_ATTRIBUTES_BLACKLIST = ('property_path',) + +COMMON_ARG_SPEC = { + 'state': { + 'default': 'present', + 'choices': ['present', 'absent'], + }, + 'force': { + 'type': 'bool', + 'default': False, + }, +} + +RESOURCE_ARG_SPEC = { + 'resource_definition': { + 'type': list_dict_str, + 'aliases': ['definition', 'inline'] + }, + 'src': { + 'type': 'path', + }, +} + +NAME_ARG_SPEC = { + 'kind': {}, + 'name': {}, + 'namespace': {}, + 'api_version': { + 'default': 'v1', + 'aliases': ['api', 'version'], + }, +} + +AUTH_ARG_SPEC = { + 'kubeconfig': { + 'type': 'path', + }, + 'context': {}, + 'host': {}, + 'api_key': { + 'no_log': True, + }, + 'username': {}, + 'password': { + 'no_log': True, + }, + 'validate_certs': { + 'type': 'bool', + 'aliases': ['verify_ssl'], + }, + 'ca_cert': { + 'type': 'path', + 'aliases': ['ssl_ca_cert'], + }, + 'client_cert': { + 'type': 'path', + 'aliases': ['cert_file'], + }, + 'client_key': { + 'type': 'path', + 'aliases': ['key_file'], + }, + 'proxy': { + 'type': 'str', + }, + 'persist_config': { + 'type': 'bool', + }, +} + +WAIT_ARG_SPEC = dict( + wait=dict(type='bool', default=False), + wait_sleep=dict(type='int', default=5), + wait_timeout=dict(type='int', default=120), + wait_condition=dict( + type='dict', + default=None, + options=dict( + type=dict(), + status=dict(default=True, choices=[True, False, "Unknown"]), + reason=dict() + ) + ) +) + +# Map kubernetes-client parameters to ansible parameters +AUTH_ARG_MAP = { + 'kubeconfig': 'kubeconfig', + 'context': 'context', + 'host': 'host', + 'api_key': 'api_key', + 'username': 'username', + 'password': 'password', + 'verify_ssl': 'validate_certs', + 'ssl_ca_cert': 'ca_cert', + 'cert_file': 'client_cert', + 'key_file': 'client_key', + 'proxy': 'proxy', + 'persist_config': 'persist_config', +} + + +class K8sAnsibleMixin(object): + + def __init__(self, *args, **kwargs): + if not HAS_K8S_MODULE_HELPER: + self.fail_json(msg=missing_required_lib('openshift'), exception=K8S_IMP_ERR, + error=to_native(k8s_import_exception)) + self.openshift_version = openshift.__version__ + + if not HAS_YAML: + self.fail_json(msg=missing_required_lib("PyYAML"), exception=YAML_IMP_ERR) + + def get_api_client(self, **auth_params): + auth_params = auth_params or getattr(self, 'params', {}) + auth = {} + + # If authorization variables aren't defined, look for them in environment variables + for true_name, arg_name in AUTH_ARG_MAP.items(): + if auth_params.get(arg_name) is None: + env_value = os.getenv('K8S_AUTH_{0}'.format(arg_name.upper()), None) or os.getenv('K8S_AUTH_{0}'.format(true_name.upper()), None) + if env_value is not None: + if AUTH_ARG_SPEC[arg_name].get('type') == 'bool': + env_value = env_value.lower() not in ['0', 'false', 'no'] + auth[true_name] = env_value + else: + auth[true_name] = auth_params[arg_name] + + def auth_set(*names): + return all([auth.get(name) for name in names]) + + if auth_set('username', 'password', 'host') or auth_set('api_key', 'host'): + # We have enough in the parameters to authenticate, no need to load incluster or kubeconfig + pass + elif auth_set('kubeconfig') or auth_set('context'): + try: + kubernetes.config.load_kube_config(auth.get('kubeconfig'), auth.get('context'), persist_config=auth.get('persist_config')) + except Exception as err: + self.fail(msg='Failed to load kubeconfig due to %s' % to_native(err)) + else: + # First try to do incluster config, then kubeconfig + try: + kubernetes.config.load_incluster_config() + except kubernetes.config.ConfigException: + try: + kubernetes.config.load_kube_config(auth.get('kubeconfig'), auth.get('context'), persist_config=auth.get('persist_config')) + except Exception as err: + self.fail(msg='Failed to load kubeconfig due to %s' % to_native(err)) + + # Override any values in the default configuration with Ansible parameters + configuration = kubernetes.client.Configuration() + for key, value in iteritems(auth): + if key in AUTH_ARG_MAP.keys() and value is not None: + if key == 'api_key': + setattr(configuration, key, {'authorization': "Bearer {0}".format(value)}) + else: + setattr(configuration, key, value) + + kubernetes.client.Configuration.set_default(configuration) + try: + return DynamicClient(kubernetes.client.ApiClient(configuration)) + except Exception as err: + self.fail(msg='Failed to get client due to %s' % to_native(err)) + + def find_resource(self, kind, api_version, fail=False): + for attribute in ['kind', 'name', 'singular_name']: + try: + return self.client.resources.get(**{'api_version': api_version, attribute: kind}) + except (ResourceNotFoundError, ResourceNotUniqueError): + pass + try: + return self.client.resources.get(api_version=api_version, short_names=[kind]) + except (ResourceNotFoundError, ResourceNotUniqueError): + if fail: + self.fail(msg='Failed to find exact match for {0}.{1} by [kind, name, singularName, shortNames]'.format(api_version, kind)) + + def kubernetes_facts(self, kind, api_version, name=None, namespace=None, label_selectors=None, field_selectors=None, + wait=False, wait_sleep=5, wait_timeout=120, state='present', condition=None): + resource = self.find_resource(kind, api_version) + if not resource: + return dict(resources=[]) + + if not label_selectors: + label_selectors = [] + if not field_selectors: + field_selectors = [] + + try: + result = resource.get(name=name, + namespace=namespace, + label_selector=','.join(label_selectors), + field_selector=','.join(field_selectors)) + if wait: + satisfied_by = [] + if isinstance(result, ResourceInstance): + # We have a list of ResourceInstance + resource_list = result.get('items', []) + if not resource_list: + resource_list = [result] + + for resource_instance in resource_list: + success, res, duration = self.wait(resource, resource_instance, + sleep=wait_sleep, timeout=wait_timeout, + state=state, condition=condition) + if not success: + self.fail(msg="Failed to gather information about %s(s) even" + " after waiting for %s seconds" % (res.get('kind'), duration)) + satisfied_by.append(res) + return dict(resources=satisfied_by) + result = result.to_dict() + except (openshift.dynamic.exceptions.BadRequestError, openshift.dynamic.exceptions.NotFoundError): + return dict(resources=[]) + + if 'items' in result: + return dict(resources=result['items']) + return dict(resources=[result]) + + def remove_aliases(self): + """ + The helper doesn't know what to do with aliased keys + """ + for k, v in iteritems(self.argspec): + if 'aliases' in v: + for alias in v['aliases']: + if alias in self.params: + self.params.pop(alias) + + def load_resource_definitions(self, src): + """ Load the requested src path """ + result = None + path = os.path.normpath(src) + if not os.path.exists(path): + self.fail(msg="Error accessing {0}. Does the file exist?".format(path)) + try: + with open(path, 'r') as f: + result = list(yaml.safe_load_all(f)) + except (IOError, yaml.YAMLError) as exc: + self.fail(msg="Error loading resource_definition: {0}".format(exc)) + return result + + def diff_objects(self, existing, new): + result = dict() + diff = recursive_diff(existing, new) + if not diff: + return True, result + + result['before'] = diff[0] + result['after'] = diff[1] + + # If only metadata.generation and metadata.resourceVersion changed, ignore it + ignored_keys = set(['generation', 'resourceVersion']) + + if list(result['after'].keys()) != ['metadata'] or list(result['before'].keys()) != ['metadata']: + return False, result + + if not set(result['after']['metadata'].keys()).issubset(ignored_keys): + return False, result + if not set(result['before']['metadata'].keys()).issubset(ignored_keys): + return False, result + + if hasattr(self, 'warn'): + self.warn('No meaningful diff was generated, but the API may not be idempotent (only metadata.generation or metadata.resourceVersion were changed)') + + return True, result + + def fail(self, msg=None): + self.fail_json(msg=msg) + + def _wait_for(self, resource, name, namespace, predicate, sleep, timeout, state): + start = datetime.now() + + def _wait_for_elapsed(): + return (datetime.now() - start).seconds + + response = None + while _wait_for_elapsed() < timeout: + try: + response = resource.get(name=name, namespace=namespace) + if predicate(response): + if response: + return True, response.to_dict(), _wait_for_elapsed() + return True, {}, _wait_for_elapsed() + time.sleep(sleep) + except NotFoundError: + if state == 'absent': + return True, {}, _wait_for_elapsed() + if response: + response = response.to_dict() + return False, response, _wait_for_elapsed() + + def wait(self, resource, definition, sleep, timeout, state='present', condition=None): + + def _deployment_ready(deployment): + # FIXME: frustratingly bool(deployment.status) is True even if status is empty + # Furthermore deployment.status.availableReplicas == deployment.status.replicas == None if status is empty + # deployment.status.replicas is None is perfectly ok if desired replicas == 0 + # Scaling up means that we also need to check that we're not in a + # situation where status.replicas == status.availableReplicas + # but spec.replicas != status.replicas + return (deployment.status + and deployment.spec.replicas == (deployment.status.replicas or 0) + and deployment.status.availableReplicas == deployment.status.replicas + and deployment.status.observedGeneration == deployment.metadata.generation + and not deployment.status.unavailableReplicas) + + def _pod_ready(pod): + return (pod.status and pod.status.containerStatuses is not None + and all([container.ready for container in pod.status.containerStatuses])) + + def _daemonset_ready(daemonset): + return (daemonset.status and daemonset.status.desiredNumberScheduled is not None + and daemonset.status.numberReady == daemonset.status.desiredNumberScheduled + and daemonset.status.observedGeneration == daemonset.metadata.generation + and not daemonset.status.unavailableReplicas) + + def _custom_condition(resource): + if not resource.status or not resource.status.conditions: + return False + match = [x for x in resource.status.conditions if x.type == condition['type']] + if not match: + return False + # There should never be more than one condition of a specific type + match = match[0] + if match.status == 'Unknown': + if match.status == condition['status']: + if 'reason' not in condition: + return True + if condition['reason']: + return match.reason == condition['reason'] + return False + status = True if match.status == 'True' else False + if status == condition['status']: + if condition.get('reason'): + return match.reason == condition['reason'] + return True + return False + + def _resource_absent(resource): + return not resource + + waiter = dict( + Deployment=_deployment_ready, + DaemonSet=_daemonset_ready, + Pod=_pod_ready + ) + kind = definition['kind'] + if state == 'present' and not condition: + predicate = waiter.get(kind, lambda x: x) + elif state == 'present' and condition: + predicate = _custom_condition + else: + predicate = _resource_absent + return self._wait_for(resource, definition['metadata']['name'], definition['metadata'].get('namespace'), predicate, sleep, timeout, state) + + def set_resource_definitions(self): + resource_definition = self.params.get('resource_definition') + + self.resource_definitions = [] + + if resource_definition: + if isinstance(resource_definition, string_types): + try: + self.resource_definitions = yaml.safe_load_all(resource_definition) + except (IOError, yaml.YAMLError) as exc: + self.fail(msg="Error loading resource_definition: {0}".format(exc)) + elif isinstance(resource_definition, list): + self.resource_definitions = resource_definition + else: + self.resource_definitions = [resource_definition] + + src = self.params.get('src') + if src: + self.resource_definitions = self.load_resource_definitions(src) + try: + self.resource_definitions = [item for item in self.resource_definitions if item] + except AttributeError: + pass + + if not resource_definition and not src: + implicit_definition = dict( + kind=self.kind, + apiVersion=self.api_version, + metadata=dict(name=self.name) + ) + if self.namespace: + implicit_definition['metadata']['namespace'] = self.namespace + self.resource_definitions = [implicit_definition] + + def check_library_version(self): + validate = self.params.get('validate') + if validate and LooseVersion(self.openshift_version) < LooseVersion("0.8.0"): + self.fail_json(msg="openshift >= 0.8.0 is required for validate") + self.append_hash = self.params.get('append_hash') + if self.append_hash and not HAS_K8S_CONFIG_HASH: + self.fail_json(msg=missing_required_lib("openshift >= 0.7.2", reason="for append_hash"), + exception=K8S_CONFIG_HASH_IMP_ERR) + if self.params['merge_type'] and LooseVersion(self.openshift_version) < LooseVersion("0.6.2"): + self.fail_json(msg=missing_required_lib("openshift >= 0.6.2", reason="for merge_type")) + self.apply = self.params.get('apply', False) + if self.apply and not HAS_K8S_APPLY: + self.fail_json(msg=missing_required_lib("openshift >= 0.9.2", reason="for apply")) + wait = self.params.get('wait', False) + if wait and not HAS_K8S_INSTANCE_HELPER: + self.fail_json(msg=missing_required_lib("openshift >= 0.4.0", reason="for wait")) + + def flatten_list_kind(self, list_resource, definitions): + flattened = [] + parent_api_version = list_resource.group_version if list_resource else None + parent_kind = list_resource.kind[:-4] if list_resource else None + for definition in definitions.get('items', []): + resource = self.find_resource(definition.get('kind', parent_kind), definition.get('apiVersion', parent_api_version), fail=True) + flattened.append((resource, self.set_defaults(resource, definition))) + return flattened + + def execute_module(self): + changed = False + results = [] + try: + self.client = self.get_api_client() + # Hopefully the kubernetes client will provide its own exception class one day + except (urllib3.exceptions.RequestError) as e: + self.fail_json(msg="Couldn't connect to Kubernetes: %s" % str(e)) + + flattened_definitions = [] + for definition in self.resource_definitions: + if definition is None: + continue + kind = definition.get('kind', self.kind) + api_version = definition.get('apiVersion', self.api_version) + if kind and kind.endswith('List'): + resource = self.find_resource(kind, api_version, fail=False) + flattened_definitions.extend(self.flatten_list_kind(resource, definition)) + else: + resource = self.find_resource(kind, api_version, fail=True) + flattened_definitions.append((resource, definition)) + + for (resource, definition) in flattened_definitions: + kind = definition.get('kind', self.kind) + api_version = definition.get('apiVersion', self.api_version) + definition = self.set_defaults(resource, definition) + self.warnings = [] + if self.params['validate'] is not None: + self.warnings = self.validate(definition) + result = self.perform_action(resource, definition) + result['warnings'] = self.warnings + changed = changed or result['changed'] + results.append(result) + + if len(results) == 1: + self.exit_json(**results[0]) + + self.exit_json(**{ + 'changed': changed, + 'result': { + 'results': results + } + }) + + def validate(self, resource): + def _prepend_resource_info(resource, msg): + return "%s %s: %s" % (resource['kind'], resource['metadata']['name'], msg) + + try: + warnings, errors = self.client.validate(resource, self.params['validate'].get('version'), self.params['validate'].get('strict')) + except KubernetesValidateMissing: + self.fail_json(msg="kubernetes-validate python library is required to validate resources") + + if errors and self.params['validate']['fail_on_error']: + self.fail_json(msg="\n".join([_prepend_resource_info(resource, error) for error in errors])) + else: + return [_prepend_resource_info(resource, msg) for msg in warnings + errors] + + def set_defaults(self, resource, definition): + definition['kind'] = resource.kind + definition['apiVersion'] = resource.group_version + metadata = definition.get('metadata', {}) + if self.name and not metadata.get('name'): + metadata['name'] = self.name + if resource.namespaced and self.namespace and not metadata.get('namespace'): + metadata['namespace'] = self.namespace + definition['metadata'] = metadata + return definition + + def perform_action(self, resource, definition): + result = {'changed': False, 'result': {}} + state = self.params.get('state', None) + force = self.params.get('force', False) + name = definition['metadata'].get('name') + namespace = definition['metadata'].get('namespace') + existing = None + wait = self.params.get('wait') + wait_sleep = self.params.get('wait_sleep') + wait_timeout = self.params.get('wait_timeout') + wait_condition = None + if self.params.get('wait_condition') and self.params['wait_condition'].get('type'): + wait_condition = self.params['wait_condition'] + + self.remove_aliases() + + try: + # ignore append_hash for resources other than ConfigMap and Secret + if self.append_hash and definition['kind'] in ['ConfigMap', 'Secret']: + name = '%s-%s' % (name, generate_hash(definition)) + definition['metadata']['name'] = name + params = dict(name=name) + if namespace: + params['namespace'] = namespace + existing = resource.get(**params) + except (NotFoundError, MethodNotAllowedError): + # Remove traceback so that it doesn't show up in later failures + try: + sys.exc_clear() + except AttributeError: + # no sys.exc_clear on python3 + pass + except ForbiddenError as exc: + if definition['kind'] in ['Project', 'ProjectRequest'] and state != 'absent': + return self.create_project_request(definition) + self.fail_json(msg='Failed to retrieve requested object: {0}'.format(exc.body), + error=exc.status, status=exc.status, reason=exc.reason) + except DynamicApiError as exc: + self.fail_json(msg='Failed to retrieve requested object: {0}'.format(exc.body), + error=exc.status, status=exc.status, reason=exc.reason) + except Exception as exc: + self.fail_json(msg='Failed to retrieve requested object: {0}'.format(to_native(exc)), + error='', status='', reason='') + + if state == 'absent': + result['method'] = "delete" + if not existing: + # The object already does not exist + return result + else: + # Delete the object + result['changed'] = True + if not self.check_mode: + try: + k8s_obj = resource.delete(**params) + result['result'] = k8s_obj.to_dict() + except DynamicApiError as exc: + self.fail_json(msg="Failed to delete object: {0}".format(exc.body), + error=exc.status, status=exc.status, reason=exc.reason) + if wait: + success, resource, duration = self.wait(resource, definition, wait_sleep, wait_timeout, 'absent') + result['duration'] = duration + if not success: + self.fail_json(msg="Resource deletion timed out", **result) + return result + else: + if self.apply: + if self.check_mode: + ignored, patch = apply_object(resource, definition) + if existing: + k8s_obj = dict_merge(existing.to_dict(), patch) + else: + k8s_obj = patch + else: + try: + k8s_obj = resource.apply(definition, namespace=namespace).to_dict() + except DynamicApiError as exc: + msg = "Failed to apply object: {0}".format(exc.body) + if self.warnings: + msg += "\n" + "\n ".join(self.warnings) + self.fail_json(msg=msg, error=exc.status, status=exc.status, reason=exc.reason) + success = True + result['result'] = k8s_obj + if wait and not self.check_mode: + success, result['result'], result['duration'] = self.wait(resource, definition, wait_sleep, wait_timeout, condition=wait_condition) + if existing: + existing = existing.to_dict() + else: + existing = {} + match, diffs = self.diff_objects(existing, result['result']) + result['changed'] = not match + result['diff'] = diffs + result['method'] = 'apply' + if not success: + self.fail_json(msg="Resource apply timed out", **result) + return result + + if not existing: + if self.check_mode: + k8s_obj = definition + else: + try: + k8s_obj = resource.create(definition, namespace=namespace).to_dict() + except ConflictError: + # Some resources, like ProjectRequests, can't be created multiple times, + # because the resources that they create don't match their kind + # In this case we'll mark it as unchanged and warn the user + self.warn("{0} was not found, but creating it returned a 409 Conflict error. This can happen \ + if the resource you are creating does not directly create a resource of the same kind.".format(name)) + return result + except DynamicApiError as exc: + msg = "Failed to create object: {0}".format(exc.body) + if self.warnings: + msg += "\n" + "\n ".join(self.warnings) + self.fail_json(msg=msg, error=exc.status, status=exc.status, reason=exc.reason) + success = True + result['result'] = k8s_obj + if wait and not self.check_mode: + success, result['result'], result['duration'] = self.wait(resource, definition, wait_sleep, wait_timeout, condition=wait_condition) + result['changed'] = True + result['method'] = 'create' + if not success: + self.fail_json(msg="Resource creation timed out", **result) + return result + + match = False + diffs = [] + + if existing and force: + if self.check_mode: + k8s_obj = definition + else: + try: + k8s_obj = resource.replace(definition, name=name, namespace=namespace, append_hash=self.append_hash).to_dict() + except DynamicApiError as exc: + msg = "Failed to replace object: {0}".format(exc.body) + if self.warnings: + msg += "\n" + "\n ".join(self.warnings) + self.fail_json(msg=msg, error=exc.status, status=exc.status, reason=exc.reason) + match, diffs = self.diff_objects(existing.to_dict(), k8s_obj) + success = True + result['result'] = k8s_obj + if wait and not self.check_mode: + success, result['result'], result['duration'] = self.wait(resource, definition, wait_sleep, wait_timeout, condition=wait_condition) + match, diffs = self.diff_objects(existing.to_dict(), result['result']) + result['changed'] = not match + result['method'] = 'replace' + result['diff'] = diffs + if not success: + self.fail_json(msg="Resource replacement timed out", **result) + return result + + # Differences exist between the existing obj and requested params + if self.check_mode: + k8s_obj = dict_merge(existing.to_dict(), definition) + else: + if LooseVersion(self.openshift_version) < LooseVersion("0.6.2"): + k8s_obj, error = self.patch_resource(resource, definition, existing, name, + namespace) + else: + for merge_type in self.params['merge_type'] or ['strategic-merge', 'merge']: + k8s_obj, error = self.patch_resource(resource, definition, existing, name, + namespace, merge_type=merge_type) + if not error: + break + if error: + self.fail_json(**error) + + success = True + result['result'] = k8s_obj + if wait and not self.check_mode: + success, result['result'], result['duration'] = self.wait(resource, definition, wait_sleep, wait_timeout, condition=wait_condition) + match, diffs = self.diff_objects(existing.to_dict(), result['result']) + result['changed'] = not match + result['method'] = 'patch' + result['diff'] = diffs + + if not success: + self.fail_json(msg="Resource update timed out", **result) + return result + + def patch_resource(self, resource, definition, existing, name, namespace, merge_type=None): + try: + params = dict(name=name, namespace=namespace) + if merge_type: + params['content_type'] = 'application/{0}-patch+json'.format(merge_type) + k8s_obj = resource.patch(definition, **params).to_dict() + match, diffs = self.diff_objects(existing.to_dict(), k8s_obj) + error = {} + return k8s_obj, {} + except DynamicApiError as exc: + msg = "Failed to patch object: {0}".format(exc.body) + if self.warnings: + msg += "\n" + "\n ".join(self.warnings) + error = dict(msg=msg, error=exc.status, status=exc.status, reason=exc.reason, warnings=self.warnings) + return None, error + except Exception as exc: + msg = "Failed to patch object: {0}".format(exc) + if self.warnings: + msg += "\n" + "\n ".join(self.warnings) + error = dict(msg=msg, error=to_native(exc), status='', reason='', warnings=self.warnings) + return None, error + + def create_project_request(self, definition): + definition['kind'] = 'ProjectRequest' + result = {'changed': False, 'result': {}} + resource = self.find_resource('ProjectRequest', definition['apiVersion'], fail=True) + if not self.check_mode: + try: + k8s_obj = resource.create(definition) + result['result'] = k8s_obj.to_dict() + except DynamicApiError as exc: + self.fail_json(msg="Failed to create object: {0}".format(exc.body), + error=exc.status, status=exc.status, reason=exc.reason) + result['changed'] = True + result['method'] = 'create' + return result + + +class KubernetesAnsibleModule(AnsibleModule, K8sAnsibleMixin): + # NOTE: This class KubernetesAnsibleModule is deprecated in favor of + # class K8sAnsibleMixin and will be removed 2.0.0 release. + # Please use K8sAnsibleMixin instead. + + def __init__(self, *args, **kwargs): + kwargs['argument_spec'] = self.argspec + AnsibleModule.__init__(self, *args, **kwargs) + K8sAnsibleMixin.__init__(self, *args, **kwargs) + + self.warn("class KubernetesAnsibleModule is deprecated" + " and will be removed in 2.0.0. Please use K8sAnsibleMixin instead.") diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/plugins/module_utils/raw.py b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/module_utils/raw.py new file mode 100644 index 00000000..a353f1cb --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/module_utils/raw.py @@ -0,0 +1,97 @@ +# +# Copyright 2018 Red Hat | Ansible +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see <http://www.gnu.org/licenses/>. + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import copy + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.kubernetes.plugins.module_utils.common import ( + K8sAnsibleMixin, AUTH_ARG_SPEC, COMMON_ARG_SPEC, RESOURCE_ARG_SPEC, NAME_ARG_SPEC) + + +class KubernetesRawModule(K8sAnsibleMixin): + # NOTE: This class KubernetesRawModule is deprecated in favor of + # class K8sAnsibleMixin and will be removed 2.0.0 release. + # Please use K8sAnsibleMixin instead. + @property + def validate_spec(self): + return dict( + fail_on_error=dict(type='bool'), + version=dict(), + strict=dict(type='bool', default=True) + ) + + @property + def condition_spec(self): + return dict( + type=dict(), + status=dict(default=True, choices=[True, False, "Unknown"]), + reason=dict() + ) + + @property + def argspec(self): + argument_spec = copy.deepcopy(COMMON_ARG_SPEC) + argument_spec.update(copy.deepcopy(NAME_ARG_SPEC)) + argument_spec.update(copy.deepcopy(RESOURCE_ARG_SPEC)) + argument_spec.update(copy.deepcopy(AUTH_ARG_SPEC)) + argument_spec['merge_type'] = dict(type='list', elements='str', choices=['json', 'merge', 'strategic-merge']) + argument_spec['wait'] = dict(type='bool', default=False) + argument_spec['wait_sleep'] = dict(type='int', default=5) + argument_spec['wait_timeout'] = dict(type='int', default=120) + argument_spec['wait_condition'] = dict(type='dict', default=None, options=self.condition_spec) + argument_spec['validate'] = dict(type='dict', default=None, options=self.validate_spec) + argument_spec['append_hash'] = dict(type='bool', default=False) + argument_spec['apply'] = dict(type='bool', default=False) + return argument_spec + + def __init__(self, k8s_kind=None, *args, **kwargs): + mutually_exclusive = [ + ('resource_definition', 'src'), + ('merge_type', 'apply'), + ] + + module = AnsibleModule( + argument_spec=self.argspec, + mutually_exclusive=mutually_exclusive, + supports_check_mode=True, + ) + + self.module = module + self.check_mode = self.module.check_mode + self.params = self.module.params + self.fail_json = self.module.fail_json + self.fail = self.module.fail_json + self.exit_json = self.module.exit_json + + self.module.warn("class KubernetesRawModule is deprecated" + " and will be removed in 2.0.0. Please use K8sAnsibleMixin instead.") + super(KubernetesRawModule, self).__init__(*args, **kwargs) + + self.client = None + self.warnings = [] + + self.kind = k8s_kind or self.params.get('kind') + self.api_version = self.params.get('api_version') + self.name = self.params.get('name') + self.namespace = self.params.get('namespace') + + self.check_library_version() + self.set_resource_definitions() diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/plugins/module_utils/scale.py b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/module_utils/scale.py new file mode 100644 index 00000000..55bab010 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/module_utils/scale.py @@ -0,0 +1,166 @@ +# +# Copyright 2018 Red Hat | Ansible +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see <http://www.gnu.org/licenses/>. + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import copy + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.kubernetes.plugins.module_utils.common import ( + AUTH_ARG_SPEC, RESOURCE_ARG_SPEC, NAME_ARG_SPEC, K8sAnsibleMixin) + +try: + from openshift.dynamic.exceptions import NotFoundError +except ImportError: + pass + + +SCALE_ARG_SPEC = { + 'replicas': {'type': 'int', 'required': True}, + 'current_replicas': {'type': 'int'}, + 'resource_version': {}, + 'wait': {'type': 'bool', 'default': True}, + 'wait_timeout': {'type': 'int', 'default': 20}, +} + + +class KubernetesAnsibleScaleModule(K8sAnsibleMixin): + + def __init__(self, k8s_kind=None, *args, **kwargs): + self.client = None + self.warnings = [] + + mutually_exclusive = [ + ('resource_definition', 'src'), + ] + + module = AnsibleModule( + argument_spec=self.argspec, + mutually_exclusive=mutually_exclusive, + supports_check_mode=True, + ) + + self.module = module + self.params = self.module.params + self.check_mode = self.module.check_mode + self.fail_json = self.module.fail_json + self.fail = self.module.fail_json + self.exit_json = self.module.exit_json + super(KubernetesAnsibleScaleModule, self).__init__() + + self.kind = k8s_kind or self.params.get('kind') + self.api_version = self.params.get('api_version') + self.name = self.params.get('name') + self.namespace = self.params.get('namespace') + self.set_resource_definitions() + + def execute_module(self): + definition = self.resource_definitions[0] + + self.client = self.get_api_client() + + name = definition['metadata']['name'] + namespace = definition['metadata'].get('namespace') + api_version = definition['apiVersion'] + kind = definition['kind'] + current_replicas = self.params.get('current_replicas') + replicas = self.params.get('replicas') + resource_version = self.params.get('resource_version') + + wait = self.params.get('wait') + wait_time = self.params.get('wait_timeout') + existing = None + existing_count = None + return_attributes = dict(changed=False, result=dict(), diff=dict()) + if wait: + return_attributes['duration'] = 0 + + resource = self.find_resource(kind, api_version, fail=True) + + try: + existing = resource.get(name=name, namespace=namespace) + return_attributes['result'] = existing.to_dict() + except NotFoundError as exc: + self.fail_json(msg='Failed to retrieve requested object: {0}'.format(exc), + error=exc.value.get('status')) + + if self.kind == 'job': + existing_count = existing.spec.parallelism + elif hasattr(existing.spec, 'replicas'): + existing_count = existing.spec.replicas + + if existing_count is None: + self.fail_json(msg='Failed to retrieve the available count for the requested object.') + + if resource_version and resource_version != existing.metadata.resourceVersion: + self.exit_json(**return_attributes) + + if current_replicas is not None and existing_count != current_replicas: + self.exit_json(**return_attributes) + + if existing_count != replicas: + return_attributes['changed'] = True + if not self.check_mode: + if self.kind == 'job': + existing.spec.parallelism = replicas + return_attributes['result'] = resource.patch(existing.to_dict()).to_dict() + else: + return_attributes = self.scale(resource, existing, replicas, wait, wait_time) + + self.exit_json(**return_attributes) + + @property + def argspec(self): + args = copy.deepcopy(SCALE_ARG_SPEC) + args.update(RESOURCE_ARG_SPEC) + args.update(NAME_ARG_SPEC) + args.update(AUTH_ARG_SPEC) + return args + + def scale(self, resource, existing_object, replicas, wait, wait_time): + name = existing_object.metadata.name + namespace = existing_object.metadata.namespace + kind = existing_object.kind + + if not hasattr(resource, 'scale'): + self.fail_json( + msg="Cannot perform scale on resource of kind {0}".format(resource.kind) + ) + + scale_obj = {'kind': kind, 'metadata': {'name': name, 'namespace': namespace}, 'spec': {'replicas': replicas}} + + existing = resource.get(name=name, namespace=namespace) + + try: + resource.scale.patch(body=scale_obj) + except Exception as exc: + self.fail_json(msg="Scale request failed: {0}".format(exc)) + + k8s_obj = resource.get(name=name, namespace=namespace).to_dict() + match, diffs = self.diff_objects(existing.to_dict(), k8s_obj) + result = dict() + result['result'] = k8s_obj + result['changed'] = not match + result['diff'] = diffs + + if wait: + success, result['result'], result['duration'] = self.wait(resource, scale_obj, 5, wait_time) + if not success: + self.fail_json(msg="Resource scaling timed out", **result) + return result diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/plugins/modules/__init__.py b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/modules/__init__.py new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/modules/__init__.py diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/plugins/modules/helm.py b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/modules/helm.py new file mode 100644 index 00000000..9df885c9 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/modules/helm.py @@ -0,0 +1,573 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = r''' +--- +module: helm + +short_description: Manages Kubernetes packages with the Helm package manager + +version_added: "0.11.0" + +author: + - Lucas Boisserie (@LucasBoisserie) + - Matthieu Diehr (@d-matt) + +requirements: + - "helm (https://github.com/helm/helm/releases)" + - "yaml (https://pypi.org/project/PyYAML/)" + +description: + - Install, upgrade, delete packages with the Helm package manager. + +options: + chart_ref: + description: + - chart_reference on chart repository. + - path to a packaged chart. + - path to an unpacked chart directory. + - absolute URL. + - Required when I(release_state) is set to C(present). + required: false + type: path + chart_repo_url: + description: + - Chart repository URL where to locate the requested chart. + required: false + type: str + chart_version: + description: + - Chart version to install. If this is not specified, the latest version is installed. + required: false + type: str + release_name: + description: + - Release name to manage. + required: true + type: str + aliases: [ name ] + release_namespace: + description: + - Kubernetes namespace where the chart should be installed. + required: true + type: str + aliases: [ namespace ] + release_state: + choices: ['present', 'absent'] + description: + - Desirated state of release. + required: false + default: present + aliases: [ state ] + type: str + release_values: + description: + - Value to pass to chart. + required: false + default: {} + aliases: [ values ] + type: dict + values_files: + description: + - Value files to pass to chart. + - Paths will be read from the target host's filesystem, not the host running ansible. + - values_files option is evaluated before values option if both are used. + - Paths are evaluated in the order the paths are specified. + required: false + default: [] + type: list + elements: str + version_added: '1.1.0' + update_repo_cache: + description: + - Run C(helm repo update) before the operation. Can be run as part of the package installation or as a separate step. + default: false + type: bool + +#Helm options + disable_hook: + description: + - Helm option to disable hook on install/upgrade/delete. + default: False + type: bool + force: + description: + - Helm option to force reinstall, ignore on new install. + default: False + type: bool + purge: + description: + - Remove the release from the store and make its name free for later use. + default: True + type: bool + wait: + description: + - Wait until all Pods, PVCs, Services, and minimum number of Pods of a Deployment are in a ready state before marking the release as successful. + default: False + type: bool + wait_timeout: + description: + - Timeout when wait option is enabled (helm2 is a number of seconds, helm3 is a duration). + type: str + atomic: + description: + - If set, the installation process deletes the installation on failure. + type: bool + default: False + create_namespace: + description: + - Create the release namespace if not present. + type: bool + default: False + version_added: "0.11.1" + replace: + description: + - Reuse the given name, only if that name is a deleted release which remains in the history. + - This is unsafe in production environment. + type: bool + default: False + version_added: "1.11.0" +extends_documentation_fragment: + - community.kubernetes.helm_common_options +''' + +EXAMPLES = r''' +- name: Deploy latest version of Prometheus chart inside monitoring namespace (and create it) + community.kubernetes.helm: + name: test + chart_ref: stable/prometheus + release_namespace: monitoring + create_namespace: true + +# From repository +- name: Add stable chart repo + community.kubernetes.helm_repository: + name: stable + repo_url: "https://kubernetes-charts.storage.googleapis.com" + +- name: Deploy latest version of Grafana chart inside monitoring namespace with values + community.kubernetes.helm: + name: test + chart_ref: stable/grafana + release_namespace: monitoring + values: + replicas: 2 + +- name: Deploy Grafana chart on 5.0.12 with values loaded from template + community.kubernetes.helm: + name: test + chart_ref: stable/grafana + chart_version: 5.0.12 + values: "{{ lookup('template', 'somefile.yaml') | from_yaml }}" + +- name: Deploy Grafana chart using values files on target + community.kubernetes.helm: + name: test + chart_ref: stable/grafana + release_namespace: monitoring + values_files: + - /path/to/values.yaml + +- name: Remove test release and waiting suppression ending + community.kubernetes.helm: + name: test + state: absent + wait: true + +# From git +- name: Git clone stable repo on HEAD + ansible.builtin.git: + repo: "http://github.com/helm/charts.git" + dest: /tmp/helm_repo + +- name: Deploy Grafana chart from local path + community.kubernetes.helm: + name: test + chart_ref: /tmp/helm_repo/stable/grafana + release_namespace: monitoring + +# From url +- name: Deploy Grafana chart on 5.0.12 from url + community.kubernetes.helm: + name: test + chart_ref: "https://kubernetes-charts.storage.googleapis.com/grafana-5.0.12.tgz" + release_namespace: monitoring +''' + +RETURN = r""" +status: + type: complex + description: A dictionary of status output + returned: on success Creation/Upgrade/Already deploy + contains: + appversion: + type: str + returned: always + description: Version of app deployed + chart: + type: str + returned: always + description: Chart name and chart version + name: + type: str + returned: always + description: Name of the release + namespace: + type: str + returned: always + description: Namespace where the release is deployed + revision: + type: str + returned: always + description: Number of time where the release has been updated + status: + type: str + returned: always + description: Status of release (can be DEPLOYED, FAILED, ...) + updated: + type: str + returned: always + description: The Date of last update + values: + type: str + returned: always + description: Dict of Values used to deploy +stdout: + type: str + description: Full `helm` command stdout, in case you want to display it or examine the event log + returned: always + sample: '' +stderr: + type: str + description: Full `helm` command stderr, in case you want to display it or examine the event log + returned: always + sample: '' +command: + type: str + description: Full `helm` command built by this module, in case you want to re-run the command outside the module or debug a problem. + returned: always + sample: helm upgrade ... +""" + +import tempfile +import traceback + +try: + import yaml + IMP_YAML = True +except ImportError: + IMP_YAML_ERR = traceback.format_exc() + IMP_YAML = False + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib, env_fallback + +module = None + + +def exec_command(command): + rc, out, err = module.run_command(command) + if rc != 0: + module.fail_json( + msg="Failure when executing Helm command. Exited {0}.\nstdout: {1}\nstderr: {2}".format(rc, out, err), + stdout=out, + stderr=err, + command=command, + ) + return rc, out, err + + +def get_values(command, release_name): + """ + Get Values from deployed release + """ + + get_command = command + " get values --output=yaml " + release_name + + rc, out, err = exec_command(get_command) + # Helm 3 return "null" string when no values are set + if out.rstrip("\n") == "null": + return {} + return yaml.safe_load(out) + + +def get_release(state, release_name): + """ + Get Release from all deployed releases + """ + + if state is not None: + for release in state: + if release['name'] == release_name: + return release + return None + + +def get_release_status(command, release_name): + """ + Get Release state from deployed release + """ + + list_command = command + " list --output=yaml --filter " + release_name + + rc, out, err = exec_command(list_command) + + release = get_release(yaml.safe_load(out), release_name) + + if release is None: # not install + return None + + release['values'] = get_values(command, release_name) + + return release + + +def run_repo_update(command): + """ + Run Repo update + """ + repo_update_command = command + " repo update" + rc, out, err = exec_command(repo_update_command) + + +def fetch_chart_info(command, chart_ref): + """ + Get chart info + """ + inspect_command = command + " show chart " + chart_ref + + rc, out, err = exec_command(inspect_command) + + return yaml.safe_load(out) + + +def deploy(command, release_name, release_values, chart_name, wait, + wait_timeout, disable_hook, force, values_files, atomic=False, + create_namespace=False, replace=False): + """ + Install/upgrade/rollback release chart + """ + if replace: + # '--replace' is not supported by 'upgrade -i' + deploy_command = command + " install" + else: + deploy_command = command + " upgrade -i" # install/upgrade + + # Always reset values to keep release_values equal to values released + deploy_command += " --reset-values" + + if wait: + deploy_command += " --wait" + if wait_timeout is not None: + deploy_command += " --timeout " + wait_timeout + + if atomic: + deploy_command += " --atomic" + + if force: + deploy_command += " --force" + + if replace: + deploy_command += " --replace" + + if disable_hook: + deploy_command += " --no-hooks" + + if create_namespace: + deploy_command += " --create-namespace" + + if values_files: + for value_file in values_files: + deploy_command += " --values=" + value_file + + if release_values != {}: + fd, path = tempfile.mkstemp(suffix='.yml') + with open(path, 'w') as yaml_file: + yaml.dump(release_values, yaml_file, default_flow_style=False) + deploy_command += " -f=" + path + + deploy_command += " " + release_name + " " + chart_name + + return deploy_command + + +def delete(command, release_name, purge, disable_hook): + """ + Delete release chart + """ + + delete_command = command + " uninstall " + + if not purge: + delete_command += " --keep-history" + + if disable_hook: + delete_command += " --no-hooks" + + delete_command += " " + release_name + + return delete_command + + +def main(): + global module + module = AnsibleModule( + argument_spec=dict( + binary_path=dict(type='path'), + chart_ref=dict(type='path'), + chart_repo_url=dict(type='str'), + chart_version=dict(type='str'), + release_name=dict(type='str', required=True, aliases=['name']), + release_namespace=dict(type='str', required=True, aliases=['namespace']), + release_state=dict(default='present', choices=['present', 'absent'], aliases=['state']), + release_values=dict(type='dict', default={}, aliases=['values']), + values_files=dict(type='list', default=[], elements='str'), + update_repo_cache=dict(type='bool', default=False), + + # Helm options + disable_hook=dict(type='bool', default=False), + force=dict(type='bool', default=False), + kube_context=dict(type='str', aliases=['context'], fallback=(env_fallback, ['K8S_AUTH_CONTEXT'])), + kubeconfig_path=dict(type='path', aliases=['kubeconfig'], fallback=(env_fallback, ['K8S_AUTH_KUBECONFIG'])), + purge=dict(type='bool', default=True), + wait=dict(type='bool', default=False), + wait_timeout=dict(type='str'), + atomic=dict(type='bool', default=False), + create_namespace=dict(type='bool', default=False), + replace=dict(type='bool', default=False), + ), + required_if=[ + ('release_state', 'present', ['release_name', 'chart_ref']), + ('release_state', 'absent', ['release_name']) + ], + supports_check_mode=True, + ) + + if not IMP_YAML: + module.fail_json(msg=missing_required_lib("yaml"), exception=IMP_YAML_ERR) + + changed = False + + bin_path = module.params.get('binary_path') + chart_ref = module.params.get('chart_ref') + chart_repo_url = module.params.get('chart_repo_url') + chart_version = module.params.get('chart_version') + release_name = module.params.get('release_name') + release_namespace = module.params.get('release_namespace') + release_state = module.params.get('release_state') + release_values = module.params.get('release_values') + values_files = module.params.get('values_files') + update_repo_cache = module.params.get('update_repo_cache') + + # Helm options + disable_hook = module.params.get('disable_hook') + force = module.params.get('force') + kube_context = module.params.get('kube_context') + kubeconfig_path = module.params.get('kubeconfig_path') + purge = module.params.get('purge') + wait = module.params.get('wait') + wait_timeout = module.params.get('wait_timeout') + atomic = module.params.get('atomic') + create_namespace = module.params.get('create_namespace') + replace = module.params.get('replace') + + if bin_path is not None: + helm_cmd_common = bin_path + else: + helm_cmd_common = module.get_bin_path('helm', required=True) + + if kube_context is not None: + helm_cmd_common += " --kube-context " + kube_context + + if kubeconfig_path is not None: + helm_cmd_common += " --kubeconfig " + kubeconfig_path + + if update_repo_cache: + run_repo_update(helm_cmd_common) + + helm_cmd_common += " --namespace=" + release_namespace + + # Get real/deployed release status + release_status = get_release_status(helm_cmd_common, release_name) + + # keep helm_cmd_common for get_release_status in module_exit_json + helm_cmd = helm_cmd_common + if release_state == "absent" and release_status is not None: + if replace: + module.fail_json(msg="replace is not applicable when state is absent") + + helm_cmd = delete(helm_cmd, release_name, purge, disable_hook) + changed = True + elif release_state == "present": + + if chart_version is not None: + helm_cmd += " --version=" + chart_version + + if chart_repo_url is not None: + helm_cmd += " --repo=" + chart_repo_url + + # Fetch chart info to have real version and real name for chart_ref from archive, folder or url + chart_info = fetch_chart_info(helm_cmd, chart_ref) + + if release_status is None: # Not installed + helm_cmd = deploy(helm_cmd, release_name, release_values, chart_ref, wait, wait_timeout, + disable_hook, False, values_files=values_files, atomic=atomic, + create_namespace=create_namespace, replace=replace) + changed = True + + else: + # the 'appVersion' specification is optional in a chart + chart_app_version = chart_info.get('appVersion', None) + released_app_version = release_status.get('app_version', None) + + # when deployed without an 'appVersion' chart value the 'helm list' command will return the entry `app_version: ""` + appversion_is_same = (chart_app_version == released_app_version) or (chart_app_version is None and released_app_version == "") + + if force or release_values != release_status['values'] \ + or (chart_info['name'] + '-' + chart_info['version']) != release_status["chart"] \ + or not appversion_is_same: + helm_cmd = deploy(helm_cmd, release_name, release_values, chart_ref, wait, wait_timeout, + disable_hook, force, values_files=values_files, atomic=atomic, + create_namespace=create_namespace, replace=replace) + changed = True + + if module.check_mode: + check_status = {'values': { + "current": release_status['values'], + "declared": release_values + }} + + module.exit_json( + changed=changed, + command=helm_cmd, + status=check_status, + stdout='', + stderr='', + ) + elif not changed: + module.exit_json( + changed=False, + status=release_status, + stdout='', + stderr='', + command=helm_cmd, + ) + + rc, out, err = exec_command(helm_cmd) + + module.exit_json( + changed=changed, + stdout=out, + stderr=err, + status=get_release_status(helm_cmd_common, release_name), + command=helm_cmd, + ) + + +if __name__ == '__main__': + main() diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/plugins/modules/helm_info.py b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/modules/helm_info.py new file mode 100644 index 00000000..03ebdde3 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/modules/helm_info.py @@ -0,0 +1,205 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2020, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = r''' +--- +module: helm_info + +short_description: Get information from Helm package deployed inside the cluster + +version_added: "0.11.0" + +author: + - Lucas Boisserie (@LucasBoisserie) + +requirements: + - "helm (https://github.com/helm/helm/releases)" + - "yaml (https://pypi.org/project/PyYAML/)" + +description: + - Get information (values, states, ...) from Helm package deployed inside the cluster. + +options: + release_name: + description: + - Release name to manage. + required: true + type: str + aliases: [ name ] + release_namespace: + description: + - Kubernetes namespace where the chart should be installed. + required: true + type: str + aliases: [ namespace ] +extends_documentation_fragment: + - community.kubernetes.helm_common_options +''' + +EXAMPLES = r''' +- name: Deploy latest version of Grafana chart inside monitoring namespace + community.kubernetes.helm_info: + name: test + release_namespace: monitoring +''' + +RETURN = r''' +status: + type: complex + description: A dictionary of status output + returned: only when release exists + contains: + appversion: + type: str + returned: always + description: Version of app deployed + chart: + type: str + returned: always + description: Chart name and chart version + name: + type: str + returned: always + description: Name of the release + namespace: + type: str + returned: always + description: Namespace where the release is deployed + revision: + type: str + returned: always + description: Number of time where the release has been updated + status: + type: str + returned: always + description: Status of release (can be DEPLOYED, FAILED, ...) + updated: + type: str + returned: always + description: The Date of last update + values: + type: str + returned: always + description: Dict of Values used to deploy +''' + +import traceback + +try: + import yaml + IMP_YAML = True +except ImportError: + IMP_YAML_ERR = traceback.format_exc() + IMP_YAML = False + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib, env_fallback + +module = None + + +# Get Values from deployed release +def get_values(command, release_name): + get_command = command + " get values --output=yaml " + release_name + + rc, out, err = module.run_command(get_command) + + if rc != 0: + module.fail_json( + msg="Failure when executing Helm command. Exited {0}.\nstdout: {1}\nstderr: {2}".format(rc, out, err), + command=get_command + ) + + # Helm 3 return "null" string when no values are set + if out.rstrip("\n") == "null": + return {} + else: + return yaml.safe_load(out) + + +# Get Release from all deployed releases +def get_release(state, release_name): + if state is not None: + for release in state: + if release['name'] == release_name: + return release + return None + + +# Get Release state from deployed release +def get_release_status(command, release_name): + list_command = command + " list --output=yaml --filter " + release_name + + rc, out, err = module.run_command(list_command) + + if rc != 0: + module.fail_json( + msg="Failure when executing Helm command. Exited {0}.\nstdout: {1}\nstderr: {2}".format(rc, out, err), + command=list_command + ) + + release = get_release(yaml.safe_load(out), release_name) + + if release is None: # not install + return None + + release['values'] = get_values(command, release_name) + + return release + + +def main(): + global module + + module = AnsibleModule( + argument_spec=dict( + binary_path=dict(type='path'), + release_name=dict(type='str', required=True, aliases=['name']), + release_namespace=dict(type='str', required=True, aliases=['namespace']), + + # Helm options + kube_context=dict(type='str', aliases=['context'], fallback=(env_fallback, ['K8S_AUTH_CONTEXT'])), + kubeconfig_path=dict(type='path', aliases=['kubeconfig'], fallback=(env_fallback, ['K8S_AUTH_KUBECONFIG'])), + ), + supports_check_mode=True, + ) + + if not IMP_YAML: + module.fail_json(msg=missing_required_lib("yaml"), exception=IMP_YAML_ERR) + + bin_path = module.params.get('binary_path') + release_name = module.params.get('release_name') + release_namespace = module.params.get('release_namespace') + + # Helm options + kube_context = module.params.get('kube_context') + kubeconfig_path = module.params.get('kubeconfig_path') + + if bin_path is not None: + helm_cmd_common = bin_path + else: + helm_cmd_common = module.get_bin_path('helm', required=True) + + if kube_context is not None: + helm_cmd_common += " --kube-context " + kube_context + + if kubeconfig_path is not None: + helm_cmd_common += " --kubeconfig " + kubeconfig_path + + helm_cmd_common += " --namespace=" + release_namespace + + release_status = get_release_status(helm_cmd_common, release_name) + + if release_status is not None: + module.exit_json(changed=False, status=release_status) + + module.exit_json(changed=False) + + +if __name__ == '__main__': + main() diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/plugins/modules/helm_plugin.py b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/modules/helm_plugin.py new file mode 100644 index 00000000..e5e28a4b --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/modules/helm_plugin.py @@ -0,0 +1,242 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2020, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = r''' +--- +module: helm_plugin +short_description: Manage Helm plugins +version_added: "1.0.0" +author: + - Abhijeet Kasurde (@Akasurde) +requirements: + - "helm (https://github.com/helm/helm/releases)" +description: + - Manages Helm plugins. +options: + release_namespace: + description: + - Kubernetes namespace where the helm plugin should be installed. + required: true + type: str + aliases: [ namespace ] + +#Helm options + state: + description: + - If C(state=present) the Helm plugin will be installed. + - If C(state=absent) the Helm plugin will be removed. + choices: [ absent, present ] + default: present + type: str + plugin_name: + description: + - Name of Helm plugin. + - Required only if C(state=absent). + type: str + plugin_path: + description: + - Plugin path to a plugin on your local file system or a url of a remote VCS repo. + - If plugin path from file system is provided, make sure that tar is present on remote + machine and not on Ansible controller. + - Required only if C(state=present). + type: str +extends_documentation_fragment: + - community.kubernetes.helm_common_options +''' + +EXAMPLES = r''' +- name: Install Helm env plugin + community.kubernetes.helm_plugin: + plugin_path: https://github.com/adamreese/helm-env + state: present + +- name: Install Helm plugin from local filesystem + community.kubernetes.helm_plugin: + plugin_path: https://domain/path/to/plugin.tar.gz + state: present + +- name: Remove Helm env plugin + community.kubernetes.helm_plugin: + plugin_name: env + state: absent +''' + +RETURN = r''' +stdout: + type: str + description: Full `helm` command stdout, in case you want to display it or examine the event log + returned: always + sample: '' +stderr: + type: str + description: Full `helm` command stderr, in case you want to display it or examine the event log + returned: always + sample: '' +command: + type: str + description: Full `helm` command built by this module, in case you want to re-run the command outside the module or debug a problem. + returned: always + sample: helm plugin list ... +msg: + type: str + description: Info about successful command + returned: always + sample: "Plugin installed successfully" +rc: + type: int + description: Helm plugin command return code + returned: always + sample: 1 +''' + +from ansible.module_utils.basic import AnsibleModule, env_fallback + + +def main(): + module = AnsibleModule( + argument_spec=dict( + binary_path=dict(type='path'), + release_namespace=dict(type='str', required=True, aliases=['namespace']), + state=dict(type='str', default='present', choices=['present', 'absent']), + plugin_path=dict(type='str',), + plugin_name=dict(type='str',), + # Helm options + context=dict(type='str', aliases=['kube_context'], fallback=(env_fallback, ['K8S_AUTH_CONTEXT'])), + kubeconfig=dict(type='path', aliases=['kubeconfig_path'], fallback=(env_fallback, ['K8S_AUTH_KUBECONFIG'])), + ), + supports_check_mode=True, + required_if=[ + ("state", "present", ("plugin_path",)), + ("state", "absent", ("plugin_name",)), + ], + mutually_exclusive=[ + ['plugin_name', 'plugin_path'], + ], + ) + + bin_path = module.params.get('binary_path') + release_namespace = module.params.get('release_namespace') + state = module.params.get('state') + + # Helm options + kube_context = module.params.get('context') + kubeconfig_path = module.params.get('kubeconfig') + + if bin_path is not None: + helm_cmd_common = bin_path + else: + helm_cmd_common = 'helm' + + helm_cmd_common = module.get_bin_path(helm_cmd_common, required=True) + + helm_cmd_common += " plugin" + + if kube_context is not None: + helm_cmd_common += " --kube-context " + kube_context + + if kubeconfig_path is not None: + helm_cmd_common += " --kubeconfig " + kubeconfig_path + + helm_cmd_common += " --namespace=" + release_namespace + + if state == 'present': + helm_cmd_common += " install %s" % module.params.get('plugin_path') + if not module.check_mode: + rc, out, err = module.run_command(helm_cmd_common) + else: + rc, out, err = (0, '', '') + + if rc == 1 and 'plugin already exists' in err: + module.exit_json( + failed=False, + changed=False, + msg="Plugin already exists", + command=helm_cmd_common, + stdout=out, + stderr=err, + rc=rc + ) + elif rc == 0: + module.exit_json( + failed=False, + changed=True, + msg="Plugin installed successfully", + command=helm_cmd_common, + stdout=out, + stderr=err, + rc=rc, + ) + else: + module.fail_json( + msg="Failure when executing Helm command.", + command=helm_cmd_common, + stdout=out, + stderr=err, + rc=rc, + ) + elif state == 'absent': + plugin_name = module.params.get('plugin_name') + helm_plugin_list = helm_cmd_common + " list" + rc, out, err = module.run_command(helm_plugin_list) + if rc != 0 or (out == '' and err == ''): + module.fail_json( + msg="Failed to get Helm plugin info", + command=helm_plugin_list, + stdout=out, + stderr=err, + rc=rc, + ) + + if out: + found = False + for line in out.splitlines(): + if line.startswith("NAME"): + continue + name, dummy, dummy = line.split('\t', 3) + name = name.strip() + if name == plugin_name: + found = True + break + if found: + helm_uninstall_cmd = "%s uninstall %s" % (helm_cmd_common, plugin_name) + if not module.check_mode: + rc, out, err = module.run_command(helm_uninstall_cmd) + else: + rc, out, err = (0, '', '') + + if rc == 0: + module.exit_json( + changed=True, + msg="Plugin uninstalled successfully", + command=helm_uninstall_cmd, + stdout=out, + stderr=err, + rc=rc + ) + module.fail_json( + msg="Failed to get Helm plugin uninstall", + command=helm_uninstall_cmd, + stdout=out, + stderr=err, + rc=rc, + ) + else: + module.exit_json( + failed=False, + changed=False, + msg="Plugin not found or is already uninstalled", + command=helm_plugin_list, + stdout=out, + stderr=err, + rc=rc + ) + + +if __name__ == '__main__': + main() diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/plugins/modules/helm_plugin_info.py b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/modules/helm_plugin_info.py new file mode 100644 index 00000000..26664b43 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/modules/helm_plugin_info.py @@ -0,0 +1,167 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2020, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = r''' +--- +module: helm_plugin_info +short_description: Gather information about Helm plugins +version_added: "1.0.0" +author: + - Abhijeet Kasurde (@Akasurde) +requirements: + - "helm (https://github.com/helm/helm/releases)" +description: + - Gather information about Helm plugins installed in namespace. +options: + release_namespace: + description: + - Kubernetes namespace where the helm plugins are installed. + required: true + type: str + aliases: [ namespace ] + +#Helm options + plugin_name: + description: + - Name of Helm plugin, to gather particular plugin info. + type: str +extends_documentation_fragment: + - community.kubernetes.helm_common_options +''' + +EXAMPLES = r''' +- name: Gather Helm plugin info + community.kubernetes.helm_plugin_info: + +- name: Gather Helm env plugin info + community.kubernetes.helm_plugin_info: + plugin_name: env +''' + +RETURN = r''' +stdout: + type: str + description: Full `helm` command stdout, in case you want to display it or examine the event log + returned: always + sample: '' +stderr: + type: str + description: Full `helm` command stderr, in case you want to display it or examine the event log + returned: always + sample: '' +command: + type: str + description: Full `helm` command built by this module, in case you want to re-run the command outside the module or debug a problem. + returned: always + sample: helm plugin list ... +plugin_list: + type: list + description: Helm plugin dict inside a list + returned: always + sample: { + "name": "env", + "version": "0.1.0", + "description": "Print out the helm environment." + } +rc: + type: int + description: Helm plugin command return code + returned: always + sample: 1 +''' + +from ansible.module_utils.basic import AnsibleModule, env_fallback + + +def main(): + module = AnsibleModule( + argument_spec=dict( + binary_path=dict(type='path'), + release_namespace=dict(type='str', required=True, aliases=['namespace']), + plugin_name=dict(type='str',), + # Helm options + context=dict(type='str', aliases=['kube_context'], fallback=(env_fallback, ['K8S_AUTH_CONTEXT'])), + kubeconfig=dict(type='path', aliases=['kubeconfig_path'], fallback=(env_fallback, ['K8S_AUTH_KUBECONFIG'])), + ), + supports_check_mode=True, + ) + + bin_path = module.params.get('binary_path') + release_namespace = module.params.get('release_namespace') + + # Helm options + kube_context = module.params.get('context') + kubeconfig_path = module.params.get('kubeconfig') + + if bin_path is not None: + helm_cmd_common = bin_path + else: + helm_cmd_common = 'helm' + + helm_cmd_common = module.get_bin_path(helm_cmd_common, required=True) + + helm_cmd_common += " plugin" + + if kube_context is not None: + helm_cmd_common += " --kube-context " + kube_context + + if kubeconfig_path is not None: + helm_cmd_common += " --kubeconfig " + kubeconfig_path + + helm_cmd_common += " --namespace=" + release_namespace + + plugin_name = module.params.get('plugin_name') + helm_plugin_list = helm_cmd_common + " list" + rc, out, err = module.run_command(helm_plugin_list) + if rc != 0 or (out == '' and err == ''): + module.fail_json( + msg="Failed to get Helm plugin info", + command=helm_plugin_list, + stdout=out, + stderr=err, + rc=rc, + ) + + plugin_list = [] + if out: + for line in out.splitlines(): + if line.startswith("NAME"): + continue + name, version, description = line.split('\t', 3) + name = name.strip() + version = version.strip() + description = description.strip() + if plugin_name is None: + plugin_list.append({ + 'name': name, + 'version': version, + 'description': description, + }) + continue + + if plugin_name == name: + plugin_list.append({ + 'name': name, + 'version': version, + 'description': description, + }) + break + + module.exit_json( + changed=True, + command=helm_plugin_list, + stdout=out, + stderr=err, + rc=rc, + plugin_list=plugin_list, + ) + + +if __name__ == '__main__': + main() diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/plugins/modules/helm_repository.py b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/modules/helm_repository.py new file mode 100644 index 00000000..d8722e63 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/modules/helm_repository.py @@ -0,0 +1,212 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2020, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = r''' +--- +module: helm_repository + +short_description: Manage Helm repositories. + +version_added: "0.11.0" + +author: + - Lucas Boisserie (@LucasBoisserie) + +requirements: + - "helm (https://github.com/helm/helm/releases)" + - "yaml (https://pypi.org/project/PyYAML/)" + +description: + - Manage Helm repositories. + +options: + binary_path: + description: + - The path of a helm binary to use. + required: false + type: path + repo_name: + description: + - Chart repository name. + required: true + type: str + aliases: [ name ] + repo_url: + description: + - Chart repository url + type: str + aliases: [ url ] + repo_username: + description: + - Chart repository username for repository with basic auth. + - Required if chart_repo_password is specified. + required: false + type: str + aliases: [ username ] + repo_password: + description: + - Chart repository password for repository with basic auth. + - Required if chart_repo_username is specified. + required: false + type: str + aliases: [ password ] + repo_state: + choices: ['present', 'absent'] + description: + - Desired state of repository. + required: false + default: present + aliases: [ state ] + type: str +''' + +EXAMPLES = r''' +- name: Add default repository + community.kubernetes.helm_repository: + name: stable + repo_url: https://kubernetes-charts.storage.googleapis.com + +- name: Add Red Hat Helm charts repository + community.kubernetes.helm_repository: + name: redhat-charts + repo_url: https://redhat-developer.github.com/redhat-helm-charts +''' + +RETURN = r''' # ''' + +import traceback + +try: + import yaml + IMP_YAML = True +except ImportError: + IMP_YAML_ERR = traceback.format_exc() + IMP_YAML = False + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib + +module = None + + +# Get repository from all repositories added +def get_repository(state, repo_name): + if state is not None: + for repository in state: + if repository['name'] == repo_name: + return repository + return None + + +# Get repository status +def get_repository_status(command, repository_name): + list_command = command + " repo list --output=yaml" + + rc, out, err = module.run_command(list_command) + + # no repo => rc=1 and 'no repositories to show' in output + if rc == 1 and "no repositories to show" in err: + return None + elif rc != 0: + module.fail_json( + msg="Failure when executing Helm command. Exited {0}.\nstdout: {1}\nstderr: {2}".format(rc, out, err), + command=list_command + ) + + return get_repository(yaml.safe_load(out), repository_name) + + +# Install repository +def install_repository(command, repository_name, repository_url, repository_username, repository_password): + install_command = command + " repo add " + repository_name + " " + repository_url + + if repository_username is not None and repository_password is not None: + install_command += " --username=" + repository_username + install_command += " --password=" + repository_password + + return install_command + + +# Delete repository +def delete_repository(command, repository_name): + remove_command = command + " repo rm " + repository_name + + return remove_command + + +def main(): + global module + + module = AnsibleModule( + argument_spec=dict( + binary_path=dict(type='path'), + repo_name=dict(type='str', aliases=['name'], required=True), + repo_url=dict(type='str', aliases=['url']), + repo_username=dict(type='str', aliases=['username']), + repo_password=dict(type='str', aliases=['password'], no_log=True), + repo_state=dict(default='present', choices=['present', 'absent'], aliases=['state']), + ), + required_together=[ + ['repo_username', 'repo_password'] + ], + required_if=[ + ('repo_state', 'present', ['repo_url']), + ], + supports_check_mode=True, + ) + + if not IMP_YAML: + module.fail_json(msg=missing_required_lib("yaml"), exception=IMP_YAML_ERR) + + changed = False + + bin_path = module.params.get('binary_path') + repo_name = module.params.get('repo_name') + repo_url = module.params.get('repo_url') + repo_username = module.params.get('repo_username') + repo_password = module.params.get('repo_password') + repo_state = module.params.get('repo_state') + + if bin_path is not None: + helm_cmd = bin_path + else: + helm_cmd = module.get_bin_path('helm', required=True) + + repository_status = get_repository_status(helm_cmd, repo_name) + + if repo_state == "absent" and repository_status is not None: + helm_cmd = delete_repository(helm_cmd, repo_name) + changed = True + elif repo_state == "present": + if repository_status is None: + helm_cmd = install_repository(helm_cmd, repo_name, repo_url, repo_username, repo_password) + changed = True + elif repository_status['url'] != repo_url: + module.fail_json(msg="Repository already have a repository named {0}".format(repo_name)) + + if module.check_mode: + module.exit_json(changed=changed) + elif not changed: + module.exit_json(changed=False, repo_name=repo_name, repo_url=repo_url) + + rc, out, err = module.run_command(helm_cmd) + + if repo_password is not None: + helm_cmd = helm_cmd.replace(repo_password, '******') + + if rc != 0: + module.fail_json( + msg="Failure when executing Helm command. Exited {0}.\nstdout: {1}\nstderr: {2}".format(rc, out, err), + command=helm_cmd + ) + + module.exit_json(changed=changed, stdout=out, stderr=err, command=helm_cmd) + + +if __name__ == '__main__': + main() diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/plugins/modules/k8s.py b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/modules/k8s.py new file mode 100644 index 00000000..18e33dfe --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/modules/k8s.py @@ -0,0 +1,320 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2018, Chris Houseknecht <@chouseknecht> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +DOCUMENTATION = r''' + +module: k8s + +short_description: Manage Kubernetes (K8s) objects + +author: + - "Chris Houseknecht (@chouseknecht)" + - "Fabian von Feilitzsch (@fabianvf)" + +description: + - Use the OpenShift Python client to perform CRUD operations on K8s objects. + - Pass the object definition from a source file or inline. See examples for reading + files and using Jinja templates or vault-encrypted files. + - Access to the full range of K8s APIs. + - Use the M(community.kubernetes.k8s_info) module to obtain a list of items about an object of type C(kind) + - Authenticate using either a config file, certificates, password or token. + - Supports check mode. + +extends_documentation_fragment: + - community.kubernetes.k8s_state_options + - community.kubernetes.k8s_name_options + - community.kubernetes.k8s_resource_options + - community.kubernetes.k8s_auth_options + - community.kubernetes.k8s_wait_options + +notes: + - If your OpenShift Python library is not 0.9.0 or newer and you are trying to + remove an item from an associative array/dictionary, for example a label or + an annotation, you will need to explicitly set the value of the item to be + removed to `null`. Simply deleting the entry in the dictionary will not + remove it from openshift or kubernetes. + +options: + merge_type: + description: + - Whether to override the default patch merge approach with a specific type. By default, the strategic + merge will typically be used. + - For example, Custom Resource Definitions typically aren't updatable by the usual strategic merge. You may + want to use C(merge) if you see "strategic merge patch format is not supported" + - See U(https://kubernetes.io/docs/tasks/run-application/update-api-object-kubectl-patch/#use-a-json-merge-patch-to-update-a-deployment) + - Requires openshift >= 0.6.2 + - If more than one merge_type is given, the merge_types will be tried in order + - If openshift >= 0.6.2, this defaults to C(['strategic-merge', 'merge']), which is ideal for using the same parameters + on resource kinds that combine Custom Resources and built-in resources. For openshift < 0.6.2, the default + is simply C(strategic-merge). + - mutually exclusive with C(apply) + choices: + - json + - merge + - strategic-merge + type: list + elements: str + validate: + description: + - how (if at all) to validate the resource definition against the kubernetes schema. + Requires the kubernetes-validate python module and openshift >= 0.8.0 + suboptions: + fail_on_error: + description: whether to fail on validation errors. + type: bool + version: + description: version of Kubernetes to validate against. defaults to Kubernetes server version + type: str + strict: + description: whether to fail when passing unexpected properties + default: True + type: bool + type: dict + append_hash: + description: + - Whether to append a hash to a resource name for immutability purposes + - Applies only to ConfigMap and Secret resources + - The parameter will be silently ignored for other resource kinds + - The full definition of an object is needed to generate the hash - this means that deleting an object created with append_hash + will only work if the same object is passed with state=absent (alternatively, just use state=absent with the name including + the generated hash and append_hash=no) + - Requires openshift >= 0.7.2 + type: bool + apply: + description: + - C(apply) compares the desired resource definition with the previously supplied resource definition, + ignoring properties that are automatically generated + - C(apply) works better with Services than 'force=yes' + - Requires openshift >= 0.9.2 + - mutually exclusive with C(merge_type) + type: bool + template: + description: + - Provide a valid YAML template definition file for an object when creating or updating. + - Value can be provided as string or dictionary. + - Mutually exclusive with C(src) and C(resource_definition). + - Template files needs to be present on the Ansible Controller's file system. + - Additional parameters can be specified using dictionary. + - 'Valid additional parameters - ' + - 'C(newline_sequence) (str): Specify the newline sequence to use for templating files. + valid choices are "\n", "\r", "\r\n". Default value "\n".' + - 'C(block_start_string) (str): The string marking the beginning of a block. + Default value "{%".' + - 'C(block_end_string) (str): The string marking the end of a block. + Default value "%}".' + - 'C(variable_start_string) (str): The string marking the beginning of a print statement. + Default value "{{".' + - 'C(variable_end_string) (str): The string marking the end of a print statement. + Default value "}}".' + - 'C(trim_blocks) (bool): Determine when newlines should be removed from blocks. When set to C(yes) the first newline + after a block is removed (block, not variable tag!). Default value is true.' + - 'C(lstrip_blocks) (bool): Determine when leading spaces and tabs should be stripped. + When set to C(yes) leading spaces and tabs are stripped from the start of a line to a block. + This functionality requires Jinja 2.7 or newer. Default value is false.' + type: raw + +requirements: + - "python >= 2.7" + - "openshift >= 0.6" + - "PyYAML >= 3.11" +''' + +EXAMPLES = r''' +- name: Create a k8s namespace + community.kubernetes.k8s: + name: testing + api_version: v1 + kind: Namespace + state: present + +- name: Create a Service object from an inline definition + community.kubernetes.k8s: + state: present + definition: + apiVersion: v1 + kind: Service + metadata: + name: web + namespace: testing + labels: + app: galaxy + service: web + spec: + selector: + app: galaxy + service: web + ports: + - protocol: TCP + targetPort: 8000 + name: port-8000-tcp + port: 8000 + +- name: Remove an existing Service object + community.kubernetes.k8s: + state: absent + api_version: v1 + kind: Service + namespace: testing + name: web + +# Passing the object definition from a file + +- name: Create a Deployment by reading the definition from a local file + community.kubernetes.k8s: + state: present + src: /testing/deployment.yml + +- name: >- + Read definition file from the Ansible controller file system. + If the definition file has been encrypted with Ansible Vault it will automatically be decrypted. + community.kubernetes.k8s: + state: present + definition: "{{ lookup('file', '/testing/deployment.yml') | from_yaml }}" + +- name: Read definition template file from the Ansible controller file system + community.kubernetes.k8s: + state: present + template: '/testing/deployment.j2' + +- name: Read definition template file from the Ansible controller file system that uses custom start/end strings + community.kubernetes.k8s: + state: present + template: + path: '/testing/deployment.j2' + variable_start_string: '[[' + variable_end_string: ']]' + +- name: fail on validation errors + community.kubernetes.k8s: + state: present + definition: "{{ lookup('template', '/testing/deployment.yml') | from_yaml }}" + validate: + fail_on_error: yes + +- name: warn on validation errors, check for unexpected properties + community.kubernetes.k8s: + state: present + definition: "{{ lookup('template', '/testing/deployment.yml') | from_yaml }}" + validate: + fail_on_error: no + strict: yes +''' + +RETURN = r''' +result: + description: + - The created, patched, or otherwise present object. Will be empty in the case of a deletion. + returned: success + type: complex + contains: + api_version: + description: The versioned schema of this representation of an object. + returned: success + type: str + kind: + description: Represents the REST resource this object represents. + returned: success + type: str + metadata: + description: Standard object metadata. Includes name, namespace, annotations, labels, etc. + returned: success + type: complex + spec: + description: Specific attributes of the object. Will vary based on the I(api_version) and I(kind). + returned: success + type: complex + status: + description: Current status details for the object. + returned: success + type: complex + items: + description: Returned only when multiple yaml documents are passed to src or resource_definition + returned: when resource_definition or src contains list of objects + type: list + duration: + description: elapsed time of task in seconds + returned: when C(wait) is true + type: int + sample: 48 +''' + +import copy + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.kubernetes.plugins.module_utils.common import ( + K8sAnsibleMixin, COMMON_ARG_SPEC, NAME_ARG_SPEC, RESOURCE_ARG_SPEC, AUTH_ARG_SPEC, WAIT_ARG_SPEC) + + +class KubernetesModule(K8sAnsibleMixin): + + @property + def validate_spec(self): + return dict( + fail_on_error=dict(type='bool'), + version=dict(), + strict=dict(type='bool', default=True) + ) + + @property + def argspec(self): + argument_spec = copy.deepcopy(COMMON_ARG_SPEC) + argument_spec.update(copy.deepcopy(NAME_ARG_SPEC)) + argument_spec.update(copy.deepcopy(RESOURCE_ARG_SPEC)) + argument_spec.update(copy.deepcopy(AUTH_ARG_SPEC)) + argument_spec.update(copy.deepcopy(WAIT_ARG_SPEC)) + argument_spec['merge_type'] = dict(type='list', elements='str', choices=['json', 'merge', 'strategic-merge']) + argument_spec['validate'] = dict(type='dict', default=None, options=self.validate_spec) + argument_spec['append_hash'] = dict(type='bool', default=False) + argument_spec['apply'] = dict(type='bool', default=False) + argument_spec['template'] = dict(type='raw', default=None) + return argument_spec + + def __init__(self, k8s_kind=None, *args, **kwargs): + mutually_exclusive = [ + ('resource_definition', 'src'), + ('merge_type', 'apply'), + ('template', 'resource_definition'), + ('template', 'src'), + ] + + module = AnsibleModule( + argument_spec=self.argspec, + mutually_exclusive=mutually_exclusive, + supports_check_mode=True, + ) + + self.module = module + self.check_mode = self.module.check_mode + self.params = self.module.params + self.fail_json = self.module.fail_json + self.fail = self.module.fail_json + self.exit_json = self.module.exit_json + + super(KubernetesModule, self).__init__(*args, **kwargs) + + self.client = None + self.warnings = [] + + self.kind = k8s_kind or self.params.get('kind') + self.api_version = self.params.get('api_version') + self.name = self.params.get('name') + self.namespace = self.params.get('namespace') + + self.check_library_version() + self.set_resource_definitions() + + +def main(): + KubernetesModule().execute_module() + + +if __name__ == '__main__': + main() diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/plugins/modules/k8s_auth.py b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/modules/k8s_auth.py new file mode 100644 index 00000000..3af297ba --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/modules/k8s_auth.py @@ -0,0 +1,336 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2018, KubeVirt Team <@kubevirt> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +DOCUMENTATION = r''' + +module: k8s_auth + +short_description: Authenticate to Kubernetes clusters which require an explicit login step + +author: KubeVirt Team (@kubevirt) + +description: + - "This module handles authenticating to Kubernetes clusters requiring I(explicit) authentication procedures, + meaning ones where a client logs in (obtains an authentication token), performs API operations using said + token and then logs out (revokes the token). An example of a Kubernetes distribution requiring this module + is OpenShift." + - "On the other hand a popular configuration for username+password authentication is one utilizing HTTP Basic + Auth, which does not involve any additional login/logout steps (instead login credentials can be attached + to each and every API call performed) and as such is handled directly by the C(k8s) module (and other + resource–specific modules) by utilizing the C(host), C(username) and C(password) parameters. Please + consult your preferred module's documentation for more details." + +options: + state: + description: + - If set to I(present) connect to the API server using the URL specified in C(host) and attempt to log in. + - If set to I(absent) attempt to log out by revoking the authentication token specified in C(api_key). + default: present + choices: + - present + - absent + type: str + host: + description: + - Provide a URL for accessing the API server. + required: true + type: str + username: + description: + - Provide a username for authenticating with the API server. + type: str + password: + description: + - Provide a password for authenticating with the API server. + type: str + ca_cert: + description: + - "Path to a CA certificate file used to verify connection to the API server. The full certificate chain + must be provided to avoid certificate validation errors." + aliases: [ ssl_ca_cert ] + type: path + validate_certs: + description: + - "Whether or not to verify the API server's SSL certificates." + type: bool + default: true + aliases: [ verify_ssl ] + api_key: + description: + - When C(state) is set to I(absent), this specifies the token to revoke. + type: str + +requirements: + - python >= 2.7 + - urllib3 + - requests + - requests-oauthlib +''' + +EXAMPLES = r''' +- hosts: localhost + module_defaults: + group/k8s: + host: https://k8s.example.com/ + ca_cert: ca.pem + tasks: + - block: + # It's good practice to store login credentials in a secure vault and not + # directly in playbooks. + - include_vars: k8s_passwords.yml + + - name: Log in (obtain access token) + community.kubernetes.k8s_auth: + username: admin + password: "{{ k8s_admin_password }}" + register: k8s_auth_results + + # Previous task provides the token/api_key, while all other parameters + # are taken from module_defaults + - name: Get a list of all pods from any namespace + community.kubernetes.k8s_info: + api_key: "{{ k8s_auth_results.k8s_auth.api_key }}" + kind: Pod + register: pod_list + + always: + - name: If login succeeded, try to log out (revoke access token) + when: k8s_auth_results.k8s_auth.api_key is defined + community.kubernetes.k8s_auth: + state: absent + api_key: "{{ k8s_auth_results.k8s_auth.api_key }}" +''' + +# Returned value names need to match k8s modules parameter names, to make it +# easy to pass returned values of k8s_auth to other k8s modules. +# Discussion: https://github.com/ansible/ansible/pull/50807#discussion_r248827899 +RETURN = r''' +k8s_auth: + description: Kubernetes authentication facts. + returned: success + type: complex + contains: + api_key: + description: Authentication token. + returned: success + type: str + host: + description: URL for accessing the API server. + returned: success + type: str + ca_cert: + description: Path to a CA certificate file used to verify connection to the API server. + returned: success + type: str + validate_certs: + description: "Whether or not to verify the API server's SSL certificates." + returned: success + type: bool + username: + description: Username for authenticating with the API server. + returned: success + type: str +''' + + +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six.moves.urllib_parse import urlparse, parse_qs, urlencode + +# 3rd party imports +try: + import requests + HAS_REQUESTS = True +except ImportError: + HAS_REQUESTS = False + +try: + from requests_oauthlib import OAuth2Session + HAS_REQUESTS_OAUTH = True +except ImportError: + HAS_REQUESTS_OAUTH = False + +try: + from urllib3.util import make_headers + HAS_URLLIB3 = True +except ImportError: + HAS_URLLIB3 = False + + +K8S_AUTH_ARG_SPEC = { + 'state': { + 'default': 'present', + 'choices': ['present', 'absent'], + }, + 'host': {'required': True}, + 'username': {}, + 'password': {'no_log': True}, + 'ca_cert': {'type': 'path', 'aliases': ['ssl_ca_cert']}, + 'validate_certs': { + 'type': 'bool', + 'default': True, + 'aliases': ['verify_ssl'] + }, + 'api_key': {'no_log': True}, +} + + +class KubernetesAuthModule(AnsibleModule): + def __init__(self): + AnsibleModule.__init__( + self, + argument_spec=K8S_AUTH_ARG_SPEC, + required_if=[ + ('state', 'present', ['username', 'password']), + ('state', 'absent', ['api_key']), + ] + ) + + if not HAS_REQUESTS: + self.fail("This module requires the python 'requests' package. Try `pip install requests`.") + + if not HAS_REQUESTS_OAUTH: + self.fail("This module requires the python 'requests-oauthlib' package. Try `pip install requests-oauthlib`.") + + if not HAS_URLLIB3: + self.fail("This module requires the python 'urllib3' package. Try `pip install urllib3`.") + + def execute_module(self): + state = self.params.get('state') + verify_ssl = self.params.get('validate_certs') + ssl_ca_cert = self.params.get('ca_cert') + + self.auth_username = self.params.get('username') + self.auth_password = self.params.get('password') + self.auth_api_key = self.params.get('api_key') + self.con_host = self.params.get('host') + + # python-requests takes either a bool or a path to a ca file as the 'verify' param + if verify_ssl and ssl_ca_cert: + self.con_verify_ca = ssl_ca_cert # path + else: + self.con_verify_ca = verify_ssl # bool + + # Get needed info to access authorization APIs + self.openshift_discover() + + if state == 'present': + new_api_key = self.openshift_login() + result = dict( + host=self.con_host, + validate_certs=verify_ssl, + ca_cert=ssl_ca_cert, + api_key=new_api_key, + username=self.auth_username, + ) + else: + self.openshift_logout() + result = dict() + + self.exit_json(changed=False, k8s_auth=result) + + def openshift_discover(self): + url = '{0}/.well-known/oauth-authorization-server'.format(self.con_host) + ret = requests.get(url, verify=self.con_verify_ca) + + if ret.status_code != 200: + self.fail_request("Couldn't find OpenShift's OAuth API", method='GET', url=url, + reason=ret.reason, status_code=ret.status_code) + + try: + oauth_info = ret.json() + + self.openshift_auth_endpoint = oauth_info['authorization_endpoint'] + self.openshift_token_endpoint = oauth_info['token_endpoint'] + except Exception: + self.fail_json(msg="Something went wrong discovering OpenShift OAuth details.", + exception=traceback.format_exc()) + + def openshift_login(self): + os_oauth = OAuth2Session(client_id='openshift-challenging-client') + authorization_url, state = os_oauth.authorization_url(self.openshift_auth_endpoint, + state="1", code_challenge_method='S256') + auth_headers = make_headers(basic_auth='{0}:{1}'.format(self.auth_username, self.auth_password)) + + # Request authorization code using basic auth credentials + ret = os_oauth.get( + authorization_url, + headers={'X-Csrf-Token': state, 'authorization': auth_headers.get('authorization')}, + verify=self.con_verify_ca, + allow_redirects=False + ) + + if ret.status_code != 302: + self.fail_request("Authorization failed.", method='GET', url=authorization_url, + reason=ret.reason, status_code=ret.status_code) + + # In here we have `code` and `state`, I think `code` is the important one + qwargs = {} + for k, v in parse_qs(urlparse(ret.headers['Location']).query).items(): + qwargs[k] = v[0] + qwargs['grant_type'] = 'authorization_code' + + # Using authorization code given to us in the Location header of the previous request, request a token + ret = os_oauth.post( + self.openshift_token_endpoint, + headers={ + 'Accept': 'application/json', + 'Content-Type': 'application/x-www-form-urlencoded', + # This is just base64 encoded 'openshift-challenging-client:' + 'Authorization': 'Basic b3BlbnNoaWZ0LWNoYWxsZW5naW5nLWNsaWVudDo=' + }, + data=urlencode(qwargs), + verify=self.con_verify_ca + ) + + if ret.status_code != 200: + self.fail_request("Failed to obtain an authorization token.", method='POST', + url=self.openshift_token_endpoint, + reason=ret.reason, status_code=ret.status_code) + + return ret.json()['access_token'] + + def openshift_logout(self): + url = '{0}/apis/oauth.openshift.io/v1/oauthaccesstokens/{1}'.format(self.con_host, self.auth_api_key) + headers = { + 'Accept': 'application/json', + 'Content-Type': 'application/json', + 'Authorization': 'Bearer {0}'.format(self.auth_api_key) + } + json = { + "apiVersion": "oauth.openshift.io/v1", + "kind": "DeleteOptions" + } + + requests.delete(url, headers=headers, json=json, verify=self.con_verify_ca) + # Ignore errors, the token will time out eventually anyway + + def fail(self, msg=None): + self.fail_json(msg=msg) + + def fail_request(self, msg, **kwargs): + req_info = {} + for k, v in kwargs.items(): + req_info['req_' + k] = v + self.fail_json(msg=msg, **req_info) + + +def main(): + module = KubernetesAuthModule() + try: + module.execute_module() + except Exception as e: + module.fail_json(msg=str(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/plugins/modules/k8s_cluster_info.py b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/modules/k8s_cluster_info.py new file mode 100644 index 00000000..e01009d6 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/modules/k8s_cluster_info.py @@ -0,0 +1,247 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2020, Abhijeet Kasurde +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = r''' +module: k8s_cluster_info + +version_added: "0.11.1" + +short_description: Describe Kubernetes (K8s) cluster, APIs available and their respective versions + +author: + - Abhijeet Kasurde (@Akasurde) + +description: + - Use the OpenShift Python client to perform read operations on K8s objects. + - Authenticate using either a config file, certificates, password or token. + - Supports check mode. + +options: + invalidate_cache: + description: + - Invalidate cache before retrieving information about cluster. + type: bool + default: True + +extends_documentation_fragment: + - community.kubernetes.k8s_auth_options + +requirements: + - "python >= 2.7" + - "openshift >= 0.6" + - "PyYAML >= 3.11" +''' + +EXAMPLES = r''' +- name: Get Cluster information + community.kubernetes.k8s_cluster_info: + register: api_status + +- name: Do not invalidate cache before getting information + community.kubernetes.k8s_cluster_info: + invalidate_cache: False + register: api_status +''' + +RETURN = r''' +connection: + description: + - Connection information + returned: success + type: dict + contains: + cert_file: + description: + - Path to client certificate. + type: str + returned: success + host: + description: + - Host URL + type: str + returned: success + password: + description: + - User password + type: str + returned: success + proxy: + description: + - Proxy details + type: str + returned: success + ssl_ca_cert: + description: + - Path to CA certificate + type: str + returned: success + username: + description: + - Username + type: str + returned: success + verify_ssl: + description: + - SSL verification status + type: bool + returned: success +version: + description: + - Information about server and client version + returned: success + type: dict + contains: + server: + description: Server version + returned: success + type: dict + client: + description: Client version + returned: success + type: str +apis: + description: + - The API(s) that exists in dictionary + returned: success + type: dict + contains: + api_version: + description: API version + returned: success + type: str + categories: + description: API categories + returned: success + type: list + group_version: + description: Resource Group version + returned: success + type: str + kind: + description: Resource kind + returned: success + type: str + name: + description: Resource short name + returned: success + type: str + namespaced: + description: If resource is namespaced + returned: success + type: bool + preferred: + description: If resource version preferred + returned: success + type: bool + short_names: + description: Resource short names + returned: success + type: str + singular_name: + description: Resource singular name + returned: success + type: str + available_api_version: + description: All available versions of the given API + returned: success + type: list + preferred_api_version: + description: Preferred version of the given API + returned: success + type: str +''' + + +import copy +import traceback + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.parsing.convert_bool import boolean +from ansible_collections.community.kubernetes.plugins.module_utils.common import K8sAnsibleMixin, AUTH_ARG_SPEC + +try: + try: + from openshift import __version__ as version + # >=0.10 + from openshift.dynamic.resource import ResourceList + except ImportError: + # <0.10 + from openshift.dynamic.client import ResourceList + HAS_K8S_INSTANCE_HELPER = True + k8s_import_exception = None +except ImportError: + HAS_K8S_INSTANCE_HELPER = False + k8s_import_exception = traceback.format_exc() + + +class KubernetesInfoModule(K8sAnsibleMixin): + + def __init__(self): + module = AnsibleModule( + argument_spec=self.argspec, + supports_check_mode=True, + ) + self.module = module + self.params = self.module.params + + if not HAS_K8S_INSTANCE_HELPER: + self.module.fail_json(msg=missing_required_lib("openshift >= 0.6.2", reason="for merge_type"), + exception=k8s_import_exception) + + super(KubernetesInfoModule, self).__init__() + + def execute_module(self): + self.client = self.get_api_client() + invalidate_cache = boolean(self.module.params.get('invalidate_cache', True), strict=False) + if invalidate_cache: + self.client.resources.invalidate_cache() + results = {} + for resource in list(self.client.resources): + resource = resource[0] + if isinstance(resource, ResourceList): + continue + results[resource.group] = { + 'api_version': resource.group_version, + 'categories': resource.categories if resource.categories else [], + 'kind': resource.kind, + 'name': resource.name, + 'namespaced': resource.namespaced, + 'preferred': resource.preferred, + 'short_names': resource.short_names if resource.short_names else [], + 'singular_name': resource.singular_name, + } + configuration = self.client.configuration + connection = { + 'cert_file': configuration.cert_file, + 'host': configuration.host, + 'password': configuration.password, + 'proxy': configuration.proxy, + 'ssl_ca_cert': configuration.ssl_ca_cert, + 'username': configuration.username, + 'verify_ssl': configuration.verify_ssl, + } + version_info = { + 'client': version, + 'server': self.client.version, + } + self.module.exit_json(changed=False, apis=results, connection=connection, version=version_info) + + @property + def argspec(self): + spec = copy.deepcopy(AUTH_ARG_SPEC) + spec['invalidate_cache'] = dict(type='bool', default=True) + return spec + + +def main(): + KubernetesInfoModule().execute_module() + + +if __name__ == '__main__': + main() diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/plugins/modules/k8s_exec.py b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/modules/k8s_exec.py new file mode 100644 index 00000000..e540b9b6 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/modules/k8s_exec.py @@ -0,0 +1,205 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2020, Red Hat +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +DOCUMENTATION = r''' + +module: k8s_exec + +short_description: Execute command in Pod + +version_added: "0.10.0" + +author: "Tristan de Cacqueray (@tristanC)" + +description: + - Use the Kubernetes Python client to execute command on K8s pods. + +extends_documentation_fragment: + - community.kubernetes.k8s_auth_options + +requirements: + - "python >= 2.7" + - "openshift == 0.4.3" + - "PyYAML >= 3.11" + +notes: +- Return code C(return_code) for the command executed is added in output in version 1.0.0. +options: + proxy: + description: + - The URL of an HTTP proxy to use for the connection. + - Can also be specified via I(K8S_AUTH_PROXY) environment variable. + - Please note that this module does not pick up typical proxy settings from the environment (e.g. HTTP_PROXY). + type: str + namespace: + description: + - The pod namespace name + type: str + required: yes + pod: + description: + - The pod name + type: str + required: yes + container: + description: + - The name of the container in the pod to connect to. + - Defaults to only container if there is only one container in the pod. + type: str + required: no + command: + description: + - The command to execute + type: str + required: yes +''' + +EXAMPLES = r''' +- name: Execute a command + community.kubernetes.k8s_exec: + namespace: myproject + pod: zuul-scheduler + command: zuul-scheduler full-reconfigure + +- name: Check RC status of command executed + community.kubernetes.k8s_exec: + namespace: myproject + pod: busybox-test + command: cmd_with_non_zero_exit_code + register: command_status + ignore_errors: True + +- name: Check last command status + debug: + msg: "cmd failed" + when: command_status.return_code != 0 +''' + +RETURN = r''' +result: + description: + - The command object + returned: success + type: complex + contains: + stdout: + description: The command stdout + type: str + stdout_lines: + description: The command stdout + type: str + stderr: + description: The command stderr + type: str + stderr_lines: + description: The command stderr + type: str + return_code: + description: The command status code + type: int +''' + +import copy +import shlex + +try: + import yaml +except ImportError: + # ImportError are managed by the common module already. + pass + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +from ansible_collections.community.kubernetes.plugins.module_utils.common import ( + K8sAnsibleMixin, AUTH_ARG_SPEC +) + +try: + from kubernetes.client.apis import core_v1_api + from kubernetes.stream import stream +except ImportError: + # ImportError are managed by the common module already. + pass + + +class KubernetesExecCommand(K8sAnsibleMixin): + + def __init__(self): + module = AnsibleModule( + argument_spec=self.argspec, + supports_check_mode=True, + ) + self.module = module + self.params = self.module.params + self.fail_json = self.module.fail_json + super(KubernetesExecCommand, self).__init__() + + @property + def argspec(self): + spec = copy.deepcopy(AUTH_ARG_SPEC) + spec['namespace'] = dict(type='str', required=True) + spec['pod'] = dict(type='str', required=True) + spec['container'] = dict(type='str') + spec['command'] = dict(type='str', required=True) + return spec + + def execute_module(self): + # Load kubernetes.client.Configuration + self.get_api_client() + api = core_v1_api.CoreV1Api() + + # hack because passing the container as None breaks things + optional_kwargs = {} + if self.params.get('container'): + optional_kwargs['container'] = self.params['container'] + try: + resp = stream( + api.connect_get_namespaced_pod_exec, + self.params["pod"], + self.params["namespace"], + command=shlex.split(self.params["command"]), + stdout=True, + stderr=True, + stdin=False, + tty=False, + _preload_content=False, **optional_kwargs) + except Exception as e: + self.module.fail_json(msg="Failed to execute on pod %s" + " due to : %s" % (self.params.get('pod'), to_native(e))) + stdout, stderr, rc = [], [], 0 + while resp.is_open(): + resp.update(timeout=1) + if resp.peek_stdout(): + stdout.append(resp.read_stdout()) + if resp.peek_stderr(): + stderr.append(resp.read_stderr()) + err = resp.read_channel(3) + err = yaml.safe_load(err) + if err['status'] == 'Success': + rc = 0 + else: + rc = int(err['details']['causes'][0]['message']) + + self.module.exit_json( + # Some command might change environment, but ultimately failing at end + changed=True, + stdout="".join(stdout), + stderr="".join(stderr), + return_code=rc + ) + + +def main(): + KubernetesExecCommand().execute_module() + + +if __name__ == '__main__': + main() diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/plugins/modules/k8s_info.py b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/modules/k8s_info.py new file mode 100644 index 00000000..f7a7a0ca --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/modules/k8s_info.py @@ -0,0 +1,200 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2018, Will Thames <@willthames> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +DOCUMENTATION = r''' +module: k8s_info + +short_description: Describe Kubernetes (K8s) objects + +author: + - "Will Thames (@willthames)" + +description: + - Use the OpenShift Python client to perform read operations on K8s objects. + - Access to the full range of K8s APIs. + - Authenticate using either a config file, certificates, password or token. + - Supports check mode. + - This module was called C(k8s_facts) before Ansible 2.9. The usage did not change. + +options: + kind: + description: + - Use to specify an object model. + - Use to create, delete, or discover an object without providing a full resource definition. + - Use in conjunction with I(api_version), I(name), and I(namespace) to identify a specific object. + - If I(resource definition) is provided, the I(kind) value from the I(resource_definition) + will override this option. + type: str + required: True + label_selectors: + description: List of label selectors to use to filter results + type: list + elements: str + field_selectors: + description: List of field selectors to use to filter results + type: list + elements: str + +extends_documentation_fragment: + - community.kubernetes.k8s_auth_options + - community.kubernetes.k8s_name_options + - community.kubernetes.k8s_wait_options + +requirements: + - "python >= 2.7" + - "openshift >= 0.6" + - "PyYAML >= 3.11" +''' + +EXAMPLES = r''' +- name: Get an existing Service object + community.kubernetes.k8s_info: + api_version: v1 + kind: Service + name: web + namespace: testing + register: web_service + +- name: Get a list of all service objects + community.kubernetes.k8s_info: + api_version: v1 + kind: Service + namespace: testing + register: service_list + +- name: Get a list of all pods from any namespace + community.kubernetes.k8s_info: + kind: Pod + register: pod_list + +- name: Search for all Pods labelled app=web + community.kubernetes.k8s_info: + kind: Pod + label_selectors: + - app = web + - tier in (dev, test) + +- name: Using vars while using label_selectors + community.kubernetes.k8s_info: + kind: Pod + label_selectors: + - "app = {{ app_label_web }}" + vars: + app_label_web: web + +- name: Search for all running pods + community.kubernetes.k8s_info: + kind: Pod + field_selectors: + - status.phase=Running + +- name: List custom objects created using CRD + community.kubernetes.k8s_info: + kind: MyCustomObject + api_version: "stable.example.com/v1" + +- name: Wait till the Object is created + community.kubernetes.k8s_info: + kind: Pod + wait: yes + name: pod-not-yet-created + namespace: default + wait_sleep: 10 + wait_timeout: 360 +''' + +RETURN = r''' +resources: + description: + - The object(s) that exists + returned: success + type: complex + contains: + api_version: + description: The versioned schema of this representation of an object. + returned: success + type: str + kind: + description: Represents the REST resource this object represents. + returned: success + type: str + metadata: + description: Standard object metadata. Includes name, namespace, annotations, labels, etc. + returned: success + type: dict + spec: + description: Specific attributes of the object. Will vary based on the I(api_version) and I(kind). + returned: success + type: dict + status: + description: Current status details for the object. + returned: success + type: dict +''' + +import copy + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.kubernetes.plugins.module_utils.common import ( + K8sAnsibleMixin, AUTH_ARG_SPEC, WAIT_ARG_SPEC) + + +class KubernetesInfoModule(K8sAnsibleMixin): + + def __init__(self, *args, **kwargs): + module = AnsibleModule( + argument_spec=self.argspec, + supports_check_mode=True, + ) + self.module = module + self.params = self.module.params + self.fail_json = self.module.fail_json + self.exit_json = self.module.exit_json + super(KubernetesInfoModule, self).__init__() + + def execute_module(self): + self.client = self.get_api_client() + + self.exit_json(changed=False, + **self.kubernetes_facts(self.params['kind'], + self.params['api_version'], + name=self.params['name'], + namespace=self.params['namespace'], + label_selectors=self.params['label_selectors'], + field_selectors=self.params['field_selectors'], + wait=self.params['wait'], + wait_sleep=self.params['wait_sleep'], + wait_timeout=self.params['wait_timeout'], + condition=self.params['wait_condition'])) + + @property + def argspec(self): + args = copy.deepcopy(AUTH_ARG_SPEC) + args.update(WAIT_ARG_SPEC) + args.update( + dict( + kind=dict(required=True), + api_version=dict(default='v1', aliases=['api', 'version']), + name=dict(), + namespace=dict(), + label_selectors=dict(type='list', elements='str', default=[]), + field_selectors=dict(type='list', elements='str', default=[]), + ) + ) + return args + + +def main(): + KubernetesInfoModule().execute_module() + + +if __name__ == '__main__': + main() diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/plugins/modules/k8s_log.py b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/modules/k8s_log.py new file mode 100644 index 00000000..e7b75711 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/modules/k8s_log.py @@ -0,0 +1,236 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2019, Fabian von Feilitzsch <@fabianvf> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +DOCUMENTATION = r''' +module: k8s_log + +short_description: Fetch logs from Kubernetes resources + +version_added: "0.10.0" + +author: + - "Fabian von Feilitzsch (@fabianvf)" + +description: + - Use the OpenShift Python client to perform read operations on K8s log endpoints. + - Authenticate using either a config file, certificates, password or token. + - Supports check mode. + - Analogous to `kubectl logs` or `oc logs` +extends_documentation_fragment: + - community.kubernetes.k8s_auth_options + - community.kubernetes.k8s_name_options +options: + kind: + description: + - Use to specify an object model. + - Use in conjunction with I(api_version), I(name), and I(namespace) to identify a specific object. + - If using I(label_selectors), cannot be overridden. + type: str + default: Pod + name: + description: + - Use to specify an object name. + - Use in conjunction with I(api_version), I(kind) and I(namespace) to identify a specific object. + - Only one of I(name) or I(label_selectors) may be provided. + type: str + label_selectors: + description: + - List of label selectors to use to filter results + - Only one of I(name) or I(label_selectors) may be provided. + type: list + elements: str + container: + description: + - Use to specify the container within a pod to grab the log from. + - If there is only one container, this will default to that container. + - If there is more than one container, this option is required. + required: no + type: str + +requirements: + - "python >= 2.7" + - "openshift >= 0.6" + - "PyYAML >= 3.11" +''' + +EXAMPLES = r''' +- name: Get a log from a Pod + community.kubernetes.k8s_log: + name: example-1 + namespace: testing + register: log + +# This will get the log from the first Pod found matching the selector +- name: Log a Pod matching a label selector + community.kubernetes.k8s_log: + namespace: testing + label_selectors: + - app=example + register: log + +# This will get the log from a single Pod managed by this Deployment +- name: Get a log from a Deployment + community.kubernetes.k8s_log: + api_version: apps/v1 + kind: Deployment + namespace: testing + name: example + register: log + +# This will get the log from a single Pod managed by this DeploymentConfig +- name: Get a log from a DeploymentConfig + community.kubernetes.k8s_log: + api_version: apps.openshift.io/v1 + kind: DeploymentConfig + namespace: testing + name: example + register: log +''' + +RETURN = r''' +log: + type: str + description: + - The text log of the object + returned: success +log_lines: + type: list + description: + - The log of the object, split on newlines + returned: success +''' + + +import copy + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six import PY2 + +from ansible_collections.community.kubernetes.plugins.module_utils.common import ( + K8sAnsibleMixin, AUTH_ARG_SPEC, NAME_ARG_SPEC) + + +class KubernetesLogModule(K8sAnsibleMixin): + + def __init__(self): + module = AnsibleModule( + argument_spec=self.argspec, + supports_check_mode=True, + ) + self.module = module + self.params = self.module.params + self.fail_json = self.module.fail_json + self.fail = self.module.fail_json + self.exit_json = self.module.exit_json + super(KubernetesLogModule, self).__init__() + + @property + def argspec(self): + args = copy.deepcopy(AUTH_ARG_SPEC) + args.update(NAME_ARG_SPEC) + args.update( + dict( + kind=dict(type='str', default='Pod'), + container=dict(), + label_selectors=dict(type='list', elements='str', default=[]), + ) + ) + return args + + def execute_module(self): + name = self.params.get('name') + namespace = self.params.get('namespace') + label_selector = ','.join(self.params.get('label_selectors', {})) + if name and label_selector: + self.fail(msg='Only one of name or label_selectors can be provided') + + self.client = self.get_api_client() + resource = self.find_resource(self.params['kind'], self.params['api_version'], fail=True) + v1_pods = self.find_resource('Pod', 'v1', fail=True) + + if 'log' not in resource.subresources: + if not name: + self.fail(msg='name must be provided for resources that do not support the log subresource') + instance = resource.get(name=name, namespace=namespace) + label_selector = ','.join(self.extract_selectors(instance)) + resource = v1_pods + + if label_selector: + instances = v1_pods.get(namespace=namespace, label_selector=label_selector) + if not instances.items: + self.fail(msg='No pods in namespace {0} matched selector {1}'.format(namespace, label_selector)) + # This matches the behavior of kubectl when logging pods via a selector + name = instances.items[0].metadata.name + resource = v1_pods + + kwargs = {} + if self.params.get('container'): + kwargs['query_params'] = dict(container=self.params['container']) + + log = serialize_log(resource.log.get( + name=name, + namespace=namespace, + serialize=False, + **kwargs + )) + + self.exit_json(changed=False, log=log, log_lines=log.split('\n')) + + def extract_selectors(self, instance): + # Parses selectors on an object based on the specifications documented here: + # https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors + selectors = [] + if not instance.spec.selector: + self.fail(msg='{0} {1} does not support the log subresource directly, and no Pod selector was found on the object'.format( + '/'.join(instance.group, instance.apiVersion), instance.kind)) + + if not (instance.spec.selector.matchLabels or instance.spec.selector.matchExpressions): + # A few resources (like DeploymentConfigs) just use a simple key:value style instead of supporting expressions + for k, v in dict(instance.spec.selector).items(): + selectors.append('{0}={1}'.format(k, v)) + return selectors + + if instance.spec.selector.matchLabels: + for k, v in dict(instance.spec.selector.matchLabels).items(): + selectors.append('{0}={1}'.format(k, v)) + + if instance.spec.selector.matchExpressions: + for expression in instance.spec.selector.matchExpressions: + operator = expression.operator + + if operator == 'Exists': + selectors.append(expression.key) + elif operator == 'DoesNotExist': + selectors.append('!{0}'.format(expression.key)) + elif operator in ['In', 'NotIn']: + selectors.append('{key} {operator} {values}'.format( + key=expression.key, + operator=operator.lower(), + values='({0})'.format(', '.join(expression.values)) + )) + else: + self.fail(msg='The k8s_log module does not support the {0} matchExpression operator'.format(operator.lower())) + + return selectors + + +def serialize_log(response): + if PY2: + return response.data + return response.data.decode('utf8') + + +def main(): + KubernetesLogModule().execute_module() + + +if __name__ == '__main__': + main() diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/plugins/modules/k8s_rollback.py b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/modules/k8s_rollback.py new file mode 100644 index 00000000..7ccd4153 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/modules/k8s_rollback.py @@ -0,0 +1,224 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2020, Julien Huon <@julienhuon> Institut National de l'Audiovisuel +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = r''' +module: k8s_rollback +short_description: Rollback Kubernetes (K8S) Deployments and DaemonSets +version_added: "1.0.0" +author: + - "Julien Huon (@julienhuon)" +description: + - Use the OpenShift Python client to perform the Rollback. + - Authenticate using either a config file, certificates, password or token. + - Similar to the C(kubectl rollout undo) command. +options: + label_selectors: + description: List of label selectors to use to filter results. + type: list + elements: str + field_selectors: + description: List of field selectors to use to filter results. + type: list + elements: str +extends_documentation_fragment: + - community.kubernetes.k8s_auth_options + - community.kubernetes.k8s_name_options +requirements: + - "python >= 2.7" + - "openshift >= 0.6" + - "PyYAML >= 3.11" +''' + +EXAMPLES = r''' +- name: Rollback a failed deployment + community.kubernetes.k8s_rollback: + api_version: apps/v1 + kind: Deployment + name: web + namespace: testing +''' + +RETURN = r''' +rollback_info: + description: + - The object that was rolled back. + returned: success + type: complex + contains: + api_version: + description: The versioned schema of this representation of an object. + returned: success + type: str + code: + description: The HTTP Code of the response + returned: success + type: str + kind: + description: Status + returned: success + type: str + metadata: + description: + - Standard object metadata. + - Includes name, namespace, annotations, labels, etc. + returned: success + type: dict + status: + description: Current status details for the object. + returned: success + type: dict +''' + +import copy + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.kubernetes.plugins.module_utils.common import ( + K8sAnsibleMixin, AUTH_ARG_SPEC, NAME_ARG_SPEC) + + +class KubernetesRollbackModule(K8sAnsibleMixin): + + def __init__(self): + module = AnsibleModule( + argument_spec=self.argspec, + supports_check_mode=True, + ) + self.module = module + self.params = self.module.params + self.fail_json = self.module.fail_json + self.fail = self.module.fail_json + self.exit_json = self.module.exit_json + super(KubernetesRollbackModule, self).__init__() + + self.kind = self.params['kind'] + self.api_version = self.params['api_version'] + self.name = self.params['name'] + self.namespace = self.params['namespace'] + self.managed_resource = {} + + if self.kind == "DaemonSet": + self.managed_resource['kind'] = "ControllerRevision" + self.managed_resource['api_version'] = "apps/v1" + elif self.kind == "Deployment": + self.managed_resource['kind'] = "ReplicaSet" + self.managed_resource['api_version'] = "apps/v1" + else: + self.fail(msg="Cannot perform rollback on resource of kind {0}".format(self.kind)) + + def execute_module(self): + results = [] + self.client = self.get_api_client() + + resources = self.kubernetes_facts(self.kind, + self.api_version, + self.name, + self.namespace, + self.params['label_selectors'], + self.params['field_selectors']) + + for resource in resources['resources']: + result = self.perform_action(resource) + results.append(result) + + self.exit_json(**{ + 'changed': True, + 'rollback_info': results + }) + + def perform_action(self, resource): + if self.kind == "DaemonSet": + current_revision = resource['metadata']['generation'] + elif self.kind == "Deployment": + current_revision = resource['metadata']['annotations']['deployment.kubernetes.io/revision'] + + managed_resources = self.kubernetes_facts(self.managed_resource['kind'], + self.managed_resource['api_version'], + '', + self.namespace, + resource['spec'] + ['selector'] + ['matchLabels'], + '') + + prev_managed_resource = get_previous_revision(managed_resources['resources'], + current_revision) + + if self.kind == "Deployment": + del prev_managed_resource['spec']['template']['metadata']['labels']['pod-template-hash'] + + resource_patch = [{ + "op": "replace", + "path": "/spec/template", + "value": prev_managed_resource['spec']['template'] + }, { + "op": "replace", + "path": "/metadata/annotations", + "value": { + "deployment.kubernetes.io/revision": prev_managed_resource['metadata']['annotations']['deployment.kubernetes.io/revision'] + } + }] + + api_target = 'deployments' + content_type = 'application/json-patch+json' + elif self.kind == "DaemonSet": + resource_patch = prev_managed_resource["data"] + + api_target = 'daemonsets' + content_type = 'application/strategic-merge-patch+json' + + rollback = self.client.request("PATCH", + "/apis/{0}/namespaces/{1}/{2}/{3}" + .format(self.api_version, + self.namespace, + api_target, + self.name), + body=resource_patch, + content_type=content_type) + + result = {'changed': True} + result['method'] = 'patch' + result['body'] = resource_patch + result['resources'] = rollback.to_dict() + return result + + @property + def argspec(self): + args = copy.deepcopy(AUTH_ARG_SPEC) + args.update(NAME_ARG_SPEC) + args.update( + dict( + label_selectors=dict(type='list', elements='str', default=[]), + field_selectors=dict(type='list', elements='str', default=[]), + ) + ) + return args + + +def get_previous_revision(all_resources, current_revision): + for resource in all_resources: + if resource['kind'] == 'ReplicaSet': + if int(resource['metadata'] + ['annotations'] + ['deployment.kubernetes.io/revision']) == int(current_revision) - 1: + return resource + elif resource['kind'] == 'ControllerRevision': + if int(resource['metadata'] + ['annotations'] + ['deprecated.daemonset.template.generation']) == int(current_revision) - 1: + return resource + return None + + +def main(): + KubernetesRollbackModule().execute_module() + + +if __name__ == '__main__': + main() diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/plugins/modules/k8s_scale.py b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/modules/k8s_scale.py new file mode 100644 index 00000000..9e63366a --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/modules/k8s_scale.py @@ -0,0 +1,129 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2018, Chris Houseknecht <@chouseknecht> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + + +__metaclass__ = type + + +DOCUMENTATION = r''' + +module: k8s_scale + +short_description: Set a new size for a Deployment, ReplicaSet, Replication Controller, or Job. + +author: + - "Chris Houseknecht (@chouseknecht)" + - "Fabian von Feilitzsch (@fabianvf)" + +description: + - Similar to the kubectl scale command. Use to set the number of replicas for a Deployment, ReplicaSet, + or Replication Controller, or the parallelism attribute of a Job. Supports check mode. + +extends_documentation_fragment: + - community.kubernetes.k8s_name_options + - community.kubernetes.k8s_auth_options + - community.kubernetes.k8s_resource_options + - community.kubernetes.k8s_scale_options + +requirements: + - "python >= 2.7" + - "openshift >= 0.6" + - "PyYAML >= 3.11" +''' + +EXAMPLES = r''' +- name: Scale deployment up, and extend timeout + community.kubernetes.k8s_scale: + api_version: v1 + kind: Deployment + name: elastic + namespace: myproject + replicas: 3 + wait_timeout: 60 + +- name: Scale deployment down when current replicas match + community.kubernetes.k8s_scale: + api_version: v1 + kind: Deployment + name: elastic + namespace: myproject + current_replicas: 3 + replicas: 2 + +- name: Increase job parallelism + community.kubernetes.k8s_scale: + api_version: batch/v1 + kind: job + name: pi-with-timeout + namespace: testing + replicas: 2 + +# Match object using local file or inline definition + +- name: Scale deployment based on a file from the local filesystem + community.kubernetes.k8s_scale: + src: /myproject/elastic_deployment.yml + replicas: 3 + wait: no + +- name: Scale deployment based on a template output + community.kubernetes.k8s_scale: + resource_definition: "{{ lookup('template', '/myproject/elastic_deployment.yml') | from_yaml }}" + replicas: 3 + wait: no + +- name: Scale deployment based on a file from the Ansible controller filesystem + community.kubernetes.k8s_scale: + resource_definition: "{{ lookup('file', '/myproject/elastic_deployment.yml') | from_yaml }}" + replicas: 3 + wait: no +''' + +RETURN = r''' +result: + description: + - If a change was made, will return the patched object, otherwise returns the existing object. + returned: success + type: complex + contains: + api_version: + description: The versioned schema of this representation of an object. + returned: success + type: str + kind: + description: Represents the REST resource this object represents. + returned: success + type: str + metadata: + description: Standard object metadata. Includes name, namespace, annotations, labels, etc. + returned: success + type: complex + spec: + description: Specific attributes of the object. Will vary based on the I(api_version) and I(kind). + returned: success + type: complex + status: + description: Current status details for the object. + returned: success + type: complex + duration: + description: elapsed time of task in seconds + returned: when C(wait) is true + type: int + sample: 48 +''' + +from ansible_collections.community.kubernetes.plugins.module_utils.scale import KubernetesAnsibleScaleModule + + +def main(): + KubernetesAnsibleScaleModule().execute_module() + + +if __name__ == '__main__': + main() diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/plugins/modules/k8s_service.py b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/modules/k8s_service.py new file mode 100644 index 00000000..0485d710 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/plugins/modules/k8s_service.py @@ -0,0 +1,272 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2018, KubeVirt Team <@kubevirt> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +DOCUMENTATION = r''' + +module: k8s_service + +short_description: Manage Services on Kubernetes + +author: KubeVirt Team (@kubevirt) + +description: + - Use Openshift Python SDK to manage Services on Kubernetes + +extends_documentation_fragment: + - community.kubernetes.k8s_auth_options + - community.kubernetes.k8s_resource_options + - community.kubernetes.k8s_state_options + +options: + merge_type: + description: + - Whether to override the default patch merge approach with a specific type. By default, the strategic + merge will typically be used. + - For example, Custom Resource Definitions typically aren't updatable by the usual strategic merge. You may + want to use C(merge) if you see "strategic merge patch format is not supported" + - See U(https://kubernetes.io/docs/tasks/run-application/update-api-object-kubectl-patch/#use-a-json-merge-patch-to-update-a-deployment) + - Requires openshift >= 0.6.2 + - If more than one merge_type is given, the merge_types will be tried in order + - If openshift >= 0.6.2, this defaults to C(['strategic-merge', 'merge']), which is ideal for using the same parameters + on resource kinds that combine Custom Resources and built-in resources. For openshift < 0.6.2, the default + is simply C(strategic-merge). + choices: + - json + - merge + - strategic-merge + type: list + elements: str + name: + description: + - Use to specify a Service object name. + required: true + type: str + namespace: + description: + - Use to specify a Service object namespace. + required: true + type: str + type: + description: + - Specifies the type of Service to create. + - See U(https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types) + choices: + - NodePort + - ClusterIP + - LoadBalancer + - ExternalName + type: str + ports: + description: + - A list of ports to expose. + - U(https://kubernetes.io/docs/concepts/services-networking/service/#multi-port-services) + type: list + elements: dict + selector: + description: + - Label selectors identify objects this Service should apply to. + - U(https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/) + type: dict + apply: + description: + - C(apply) compares the desired resource definition with the previously supplied resource definition, + ignoring properties that are automatically generated + - C(apply) works better with Services than 'force=yes' + - mutually exclusive with C(merge_type) + type: bool + +requirements: + - python >= 2.7 + - openshift >= 0.6.2 +''' + +EXAMPLES = r''' +- name: Expose https port with ClusterIP + community.kubernetes.k8s_service: + state: present + name: test-https + namespace: default + ports: + - port: 443 + protocol: TCP + selector: + key: special + +- name: Expose https port with ClusterIP using spec + community.kubernetes.k8s_service: + state: present + name: test-https + namespace: default + inline: + spec: + ports: + - port: 443 + protocol: TCP + selector: + key: special +''' + +RETURN = r''' +result: + description: + - The created, patched, or otherwise present Service object. Will be empty in the case of a deletion. + returned: success + type: complex + contains: + api_version: + description: The versioned schema of this representation of an object. + returned: success + type: str + kind: + description: Always 'Service'. + returned: success + type: str + metadata: + description: Standard object metadata. Includes name, namespace, annotations, labels, etc. + returned: success + type: complex + spec: + description: Specific attributes of the object. Will vary based on the I(api_version) and I(kind). + returned: success + type: complex + status: + description: Current status details for the object. + returned: success + type: complex +''' + +import copy +import traceback + +from collections import defaultdict + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.kubernetes.plugins.module_utils.common import ( + K8sAnsibleMixin, AUTH_ARG_SPEC, COMMON_ARG_SPEC, RESOURCE_ARG_SPEC) + + +SERVICE_ARG_SPEC = { + 'apply': { + 'type': 'bool', + 'default': False, + }, + 'name': {'required': True}, + 'namespace': {'required': True}, + 'merge_type': {'type': 'list', 'elements': 'str', 'choices': ['json', 'merge', 'strategic-merge']}, + 'selector': {'type': 'dict'}, + 'type': { + 'type': 'str', + 'choices': [ + 'NodePort', 'ClusterIP', 'LoadBalancer', 'ExternalName' + ], + }, + 'ports': {'type': 'list', 'elements': 'dict'}, +} + + +class KubernetesService(K8sAnsibleMixin): + def __init__(self, *args, **kwargs): + mutually_exclusive = [ + ('resource_definition', 'src'), + ('merge_type', 'apply'), + ] + + module = AnsibleModule( + argument_spec=self.argspec, + mutually_exclusive=mutually_exclusive, + supports_check_mode=True, + ) + + self.module = module + self.check_mode = self.module.check_mode + self.params = self.module.params + self.fail_json = self.module.fail_json + self.fail = self.module.fail_json + self.exit_json = self.module.exit_json + + super(KubernetesService, self).__init__(*args, **kwargs) + + self.client = None + self.warnings = [] + + self.kind = self.params.get('kind') + self.api_version = self.params.get('api_version') + self.name = self.params.get('name') + self.namespace = self.params.get('namespace') + + self.check_library_version() + self.set_resource_definitions() + + @staticmethod + def merge_dicts(x, y): + for k in set(x.keys()).union(y.keys()): + if k in x and k in y: + if isinstance(x[k], dict) and isinstance(y[k], dict): + yield (k, dict(KubernetesService.merge_dicts(x[k], y[k]))) + else: + yield (k, y[k]) + elif k in x: + yield (k, x[k]) + else: + yield (k, y[k]) + + @property + def argspec(self): + """ argspec property builder """ + argument_spec = copy.deepcopy(AUTH_ARG_SPEC) + argument_spec.update(COMMON_ARG_SPEC) + argument_spec.update(RESOURCE_ARG_SPEC) + argument_spec.update(SERVICE_ARG_SPEC) + return argument_spec + + def execute_module(self): + """ Module execution """ + self.client = self.get_api_client() + + api_version = 'v1' + selector = self.params.get('selector') + service_type = self.params.get('type') + ports = self.params.get('ports') + + definition = defaultdict(defaultdict) + + definition['kind'] = 'Service' + definition['apiVersion'] = api_version + + def_spec = definition['spec'] + def_spec['type'] = service_type + def_spec['ports'] = ports + def_spec['selector'] = selector + + def_meta = definition['metadata'] + def_meta['name'] = self.params.get('name') + def_meta['namespace'] = self.params.get('namespace') + + # 'resource_definition:' has lower priority than module parameters + definition = dict(self.merge_dicts(self.resource_definitions[0], definition)) + + resource = self.find_resource('Service', api_version, fail=True) + definition = self.set_defaults(resource, definition) + result = self.perform_action(resource, definition) + + self.exit_json(**result) + + +def main(): + module = KubernetesService() + try: + module.execute_module() + except Exception as e: + module.fail_json(msg=str(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/requirements.txt b/collections-debian-merged/ansible_collections/community/kubernetes/requirements.txt new file mode 100644 index 00000000..6ed70a79 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/requirements.txt @@ -0,0 +1,2 @@ +openshift>=0.6.2 +requests-oauthlib diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/setup.cfg b/collections-debian-merged/ansible_collections/community/kubernetes/setup.cfg new file mode 100644 index 00000000..29c924b8 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/setup.cfg @@ -0,0 +1,3 @@ +[flake8] +max-line-length = 160 +ignore = W503,E402 diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/tests/integration/targets/kubernetes/README.md b/collections-debian-merged/ansible_collections/community/kubernetes/tests/integration/targets/kubernetes/README.md new file mode 100644 index 00000000..2a09ca17 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/tests/integration/targets/kubernetes/README.md @@ -0,0 +1,19 @@ +Wait tests +---------- + +wait tests require at least one node, and don't work on the normal k8s +openshift-origin container as provided by ansible-test --docker -v k8s + +minikube, Kubernetes from Docker or any other Kubernetes service will +suffice. + +If kubectl is already using the right config file and context, you can +just do + +``` +cd tests/integration/targets/k8s +./runme.sh -vv +``` + +otherwise set one or both of `K8S_AUTH_KUBECONFIG` and `K8S_AUTH_CONTEXT` +and use the same command diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/tests/integration/targets/kubernetes/aliases b/collections-debian-merged/ansible_collections/community/kubernetes/tests/integration/targets/kubernetes/aliases new file mode 100644 index 00000000..326e6910 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/tests/integration/targets/kubernetes/aliases @@ -0,0 +1,2 @@ +cloud/openshift +shippable/cloud/group1 diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/tests/integration/targets/kubernetes/defaults/main.yml b/collections-debian-merged/ansible_collections/community/kubernetes/tests/integration/targets/kubernetes/defaults/main.yml new file mode 100644 index 00000000..e46ca26f --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/tests/integration/targets/kubernetes/defaults/main.yml @@ -0,0 +1,2 @@ +--- +k8s_openshift: true diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/tests/integration/targets/kubernetes/files/crd-resource.yml b/collections-debian-merged/ansible_collections/community/kubernetes/tests/integration/targets/kubernetes/files/crd-resource.yml new file mode 100644 index 00000000..23d0663c --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/tests/integration/targets/kubernetes/files/crd-resource.yml @@ -0,0 +1,21 @@ +--- +apiVersion: certmanager.k8s.io/v1alpha1 +kind: Certificate +metadata: + name: acme-crt +spec: + secretName: acme-crt-secret + dnsNames: + - foo.example.com + - bar.example.com + acme: + config: + - ingressClass: nginx + domains: + - foo.example.com + - bar.example.com + issuerRef: + name: letsencrypt-prod + # We can reference ClusterIssuers by changing the kind here. + # The default value is Issuer (i.e. a locally namespaced Issuer) + kind: Issuer diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/tests/integration/targets/kubernetes/files/kuard-extra-property.yml b/collections-debian-merged/ansible_collections/community/kubernetes/tests/integration/targets/kubernetes/files/kuard-extra-property.yml new file mode 100644 index 00000000..bed92bc7 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/tests/integration/targets/kubernetes/files/kuard-extra-property.yml @@ -0,0 +1,22 @@ +--- +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + labels: + app: kuard + name: kuard + namespace: default +spec: + replicas: 3 + selector: + matchLabels: + app: kuard + unwanted: value + template: + metadata: + labels: + app: kuard + spec: + containers: + - image: gcr.io/kuar-demo/kuard-amd64:1 + name: kuard diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/tests/integration/targets/kubernetes/files/kuard-invalid-type.yml b/collections-debian-merged/ansible_collections/community/kubernetes/tests/integration/targets/kubernetes/files/kuard-invalid-type.yml new file mode 100644 index 00000000..72505f88 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/tests/integration/targets/kubernetes/files/kuard-invalid-type.yml @@ -0,0 +1,21 @@ +--- +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + labels: + app: kuard + name: kuard + namespace: default +spec: + replicas: hello + selector: + matchLabels: + app: kuard + template: + metadata: + labels: + app: kuard + spec: + containers: + - image: gcr.io/kuar-demo/kuard-amd64:1 + name: kuard diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/tests/integration/targets/kubernetes/files/setup-crd.yml b/collections-debian-merged/ansible_collections/community/kubernetes/tests/integration/targets/kubernetes/files/setup-crd.yml new file mode 100644 index 00000000..9c01bc1a --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/tests/integration/targets/kubernetes/files/setup-crd.yml @@ -0,0 +1,15 @@ +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: certificates.certmanager.k8s.io +spec: + group: certmanager.k8s.io + version: v1alpha1 + scope: Namespaced + names: + kind: Certificate + plural: certificates + shortNames: + - cert + - certs diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/tests/integration/targets/kubernetes/handlers/main.yml b/collections-debian-merged/ansible_collections/community/kubernetes/tests/integration/targets/kubernetes/handlers/main.yml new file mode 100644 index 00000000..efb5408e --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/tests/integration/targets/kubernetes/handlers/main.yml @@ -0,0 +1,6 @@ +--- +- name: delete temporary directory + file: + path: "{{ remote_tmp_dir }}" + state: absent + no_log: yes diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/tests/integration/targets/kubernetes/library/README.md b/collections-debian-merged/ansible_collections/community/kubernetes/tests/integration/targets/kubernetes/library/README.md new file mode 100644 index 00000000..ac312297 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/tests/integration/targets/kubernetes/library/README.md @@ -0,0 +1,3 @@ +# README + +The `test_tempfile.py` module added here is only used for the `setup_remote_tmp_dir.yml` temporary directory setup task. It is a clone of the `tempfile.py` community-supported Ansible module, and has to be included with the tests here because it is not available in the `ansible-base` distribution against which this collection is tested. diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/tests/integration/targets/kubernetes/library/test_tempfile.py b/collections-debian-merged/ansible_collections/community/kubernetes/tests/integration/targets/kubernetes/library/test_tempfile.py new file mode 100644 index 00000000..c89f5a31 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/tests/integration/targets/kubernetes/library/test_tempfile.py @@ -0,0 +1,121 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2016, Krzysztof Magosa <krzysztof@magosa.pl> +# Copyright: (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: test_tempfile + +short_description: Creates temporary files and directories + +description: + - The C(test_tempfile) module creates temporary files and directories. C(mktemp) command takes different parameters on various systems, this module helps + to avoid troubles related to that. Files/directories created by module are accessible only by creator. In case you need to make them world-accessible + you need to use M(ansible.builtin.file) module. + - For Windows targets, use the M(ansible.builtin.win_tempfile) module instead. + +options: + state: + description: + - Whether to create file or directory. + type: str + choices: [ directory, file ] + default: file + path: + description: + - Location where temporary file or directory should be created. + - If path is not specified, the default system temporary directory will be used. + type: path + prefix: + description: + - Prefix of file/directory name created by module. + type: str + default: ansible. + suffix: + description: + - Suffix of file/directory name created by module. + type: str + default: "" + +seealso: +- module: file +- module: win_tempfile + +author: + - Krzysztof Magosa (@krzysztof-magosa) +''' + +EXAMPLES = """ +- name: create temporary build directory + test_tempfile: + state: directory + suffix: build + +- name: create temporary file + test_tempfile: + state: file + suffix: temp + register: tempfile_1 + +- name: use the registered var and the file module to remove the temporary file + file: + path: "{{ tempfile_1.path }}" + state: absent + when: tempfile_1.path is defined +""" + +RETURN = ''' +path: + description: Path to created file or directory + returned: success + type: str + sample: "/tmp/ansible.bMlvdk" +''' + +from os import close +from tempfile import mkstemp, mkdtemp +from traceback import format_exc + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native + + +def main(): + module = AnsibleModule( + argument_spec=dict( + state=dict(type='str', default='file', choices=['file', 'directory']), + path=dict(type='path'), + prefix=dict(type='str', default='ansible.'), + suffix=dict(type='str', default=''), + ), + ) + + try: + if module.params['state'] == 'file': + handle, path = mkstemp( + prefix=module.params['prefix'], + suffix=module.params['suffix'], + dir=module.params['path'], + ) + close(handle) + elif module.params['state'] == 'directory': + path = mkdtemp( + prefix=module.params['prefix'], + suffix=module.params['suffix'], + dir=module.params['path'], + ) + + module.exit_json(changed=True, path=path) + except Exception as e: + module.fail_json(msg=to_native(e), exception=format_exc()) + + +if __name__ == '__main__': + main() diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/tests/integration/targets/kubernetes/meta/main.yml b/collections-debian-merged/ansible_collections/community/kubernetes/tests/integration/targets/kubernetes/meta/main.yml new file mode 100644 index 00000000..23d65c7e --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/tests/integration/targets/kubernetes/meta/main.yml @@ -0,0 +1,2 @@ +--- +dependencies: [] diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/tests/integration/targets/kubernetes/tasks/main.yml b/collections-debian-merged/ansible_collections/community/kubernetes/tests/integration/targets/kubernetes/tasks/main.yml new file mode 100644 index 00000000..b50df180 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/tests/integration/targets/kubernetes/tasks/main.yml @@ -0,0 +1,81 @@ +--- +- include_tasks: setup_remote_tmp_dir.yml + +- set_fact: + virtualenv: "{{ remote_tmp_dir }}/virtualenv" + virtualenv_command: "{{ ansible_python_interpreter }} -m virtualenv" + +- set_fact: + virtualenv_interpreter: "{{ virtualenv }}/bin/python" + +- pip: + name: virtualenv + +# Test graceful failure for missing kubernetes-validate + +- pip: + name: + - openshift>=0.9.2 + - coverage + virtualenv: "{{ virtualenv }}" + virtualenv_command: "{{ virtualenv_command }}" + virtualenv_site_packages: no + +- include_tasks: validate_not_installed.yml + vars: + ansible_python_interpreter: "{{ virtualenv_interpreter }}" + +- file: + path: "{{ virtualenv }}" + state: absent + no_log: yes + +# Test validate with kubernetes-validate + +- pip: + name: + - kubernetes-validate==1.12.0 + - openshift>=0.9.2 + - coverage + virtualenv: "{{ virtualenv }}" + virtualenv_command: "{{ virtualenv_command }}" + virtualenv_site_packages: no + +- include_tasks: validate_installed.yml + vars: + ansible_python_interpreter: "{{ virtualenv_interpreter }}" + playbook_namespace: ansible-test-k8s-validate + +- file: + path: "{{ virtualenv }}" + state: absent + no_log: yes + +# Test graceful failure for older versions of openshift + +- pip: + name: + - openshift==0.6.0 + - kubernetes==6.0.0 + - coverage + virtualenv: "{{ virtualenv }}" + virtualenv_command: "{{ virtualenv_command }}" + virtualenv_site_packages: no + +- include_tasks: older_openshift_fail.yml + vars: + ansible_python_interpreter: "{{ virtualenv_interpreter }}" + playbook_namespace: ansible-test-k8s-older-openshift + +- file: + path: "{{ virtualenv }}" + state: absent + no_log: yes + +# Test openshift + +- debug: + var: k8s_openshift + +- include: openshift.yml + when: k8s_openshift | bool diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/tests/integration/targets/kubernetes/tasks/older_openshift_fail.yml b/collections-debian-merged/ansible_collections/community/kubernetes/tests/integration/targets/kubernetes/tasks/older_openshift_fail.yml new file mode 100644 index 00000000..213d2357 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/tests/integration/targets/kubernetes/tasks/older_openshift_fail.yml @@ -0,0 +1,71 @@ +--- +# TODO: Not available in ansible-base +# - python_requirements_info: +# dependencies: +# - openshift==0.6.0 +# - kubernetes==6.0.0 + +# append_hash +- name: use append_hash with ConfigMap + k8s: + definition: + metadata: + name: config-map-test + namespace: "{{ playbook_namespace }}" + apiVersion: v1 + kind: ConfigMap + data: + hello: world + append_hash: yes + ignore_errors: yes + register: k8s_append_hash + +- name: assert that append_hash fails gracefully + assert: + that: + - k8s_append_hash is failed + - "'Failed to import the required Python library (openshift >= 0.7.2)' in k8s_append_hash.msg" + - "'. This is required for append_hash.' in k8s_append_hash.msg" + +# validate +- name: attempt to use validate with older openshift + k8s: + definition: + metadata: + name: config-map-test + namespace: "{{ playbook_namespace }}" + apiVersion: v1 + kind: ConfigMap + data: + hello: world + validate: + fail_on_error: yes + ignore_errors: yes + register: k8s_validate + +- name: assert that validate fails gracefully + assert: + that: + - k8s_validate is failed + - "k8s_validate.msg == 'openshift >= 0.8.0 is required for validate'" + +# apply +- name: attempt to use apply with older openshift + k8s: + definition: + metadata: + name: config-map-test + namespace: "{{ playbook_namespace }}" + apiVersion: v1 + kind: ConfigMap + data: + hello: world + apply: yes + ignore_errors: yes + register: k8s_apply + +- name: assert that apply fails gracefully + assert: + that: + - k8s_apply is failed + - "k8s_apply.msg.startswith('Failed to import the required Python library (openshift >= 0.9.2)')" diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/tests/integration/targets/kubernetes/tasks/openshift.yml b/collections-debian-merged/ansible_collections/community/kubernetes/tests/integration/targets/kubernetes/tasks/openshift.yml new file mode 100644 index 00000000..af0f51a7 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/tests/integration/targets/kubernetes/tasks/openshift.yml @@ -0,0 +1,62 @@ +--- +# OpenShift Resources +- name: Create a project + k8s: + name: testing + kind: Project + api_version: v1 + apply: no + register: output + +- name: show output + debug: + var: output + +- name: Create deployment config + k8s: + state: present + inline: &dc + apiVersion: v1 + kind: DeploymentConfig + metadata: + name: elastic + labels: + app: galaxy + service: elastic + namespace: testing + spec: + template: + metadata: + labels: + app: galaxy + service: elastic + spec: + containers: + - name: elastic + volumeMounts: + - mountPath: /usr/share/elasticsearch/data + name: elastic-volume + command: ['elasticsearch'] + image: 'ansible/galaxy-elasticsearch:2.4.6' + volumes: + - name: elastic-volume + persistentVolumeClaim: + claimName: elastic-volume + replicas: 1 + strategy: + type: Rolling + register: output + +- name: Show output + debug: + var: output + +- name: Create deployment config again + k8s: + state: present + inline: *dc + register: output + +- name: DC creation should be idempotent + assert: + that: not output.changed diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/tests/integration/targets/kubernetes/tasks/setup_remote_tmp_dir.yml b/collections-debian-merged/ansible_collections/community/kubernetes/tests/integration/targets/kubernetes/tasks/setup_remote_tmp_dir.yml new file mode 100644 index 00000000..8acdb49e --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/tests/integration/targets/kubernetes/tasks/setup_remote_tmp_dir.yml @@ -0,0 +1,12 @@ +--- +- name: create temporary directory + test_tempfile: + state: directory + suffix: .test + register: remote_tmp_dir + notify: + - delete temporary directory + +- name: record temporary directory + set_fact: + remote_tmp_dir: "{{ remote_tmp_dir.path }}" diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/tests/integration/targets/kubernetes/tasks/validate_installed.yml b/collections-debian-merged/ansible_collections/community/kubernetes/tests/integration/targets/kubernetes/tasks/validate_installed.yml new file mode 100644 index 00000000..e00c7c23 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/tests/integration/targets/kubernetes/tasks/validate_installed.yml @@ -0,0 +1,126 @@ +--- +- block: + - name: Create a namespace + k8s: + name: "{{ playbook_namespace }}" + kind: Namespace + + - copy: + src: files + dest: "{{ remote_tmp_dir }}" + + - name: incredibly simple ConfigMap + k8s: + definition: + apiVersion: v1 + kind: ConfigMap + metadata: + name: hello + namespace: "{{ playbook_namespace }}" + validate: + fail_on_error: yes + register: k8s_with_validate + + - name: assert that k8s_with_validate succeeds + assert: + that: + - k8s_with_validate is successful + + - name: extra property does not fail without strict + k8s: + src: "{{ remote_tmp_dir }}/files/kuard-extra-property.yml" + namespace: "{{ playbook_namespace }}" + validate: + fail_on_error: yes + strict: no + + - name: extra property fails with strict + k8s: + src: "{{ remote_tmp_dir }}/files/kuard-extra-property.yml" + namespace: "{{ playbook_namespace }}" + validate: + fail_on_error: yes + strict: yes + ignore_errors: yes + register: extra_property + + - name: check that extra property fails with strict + assert: + that: + - extra_property is failed + + - name: invalid type fails at validation stage + k8s: + src: "{{ remote_tmp_dir }}/files/kuard-invalid-type.yml" + namespace: "{{ playbook_namespace }}" + validate: + fail_on_error: yes + strict: no + ignore_errors: yes + register: invalid_type + + - name: check that invalid type fails + assert: + that: + - invalid_type is failed + + - name: invalid type fails with warnings when fail_on_error is False + k8s: + src: "{{ remote_tmp_dir }}/files/kuard-invalid-type.yml" + namespace: "{{ playbook_namespace }}" + validate: + fail_on_error: no + strict: no + ignore_errors: yes + register: invalid_type_no_fail + + - name: check that invalid type fails + assert: + that: + - invalid_type_no_fail is failed + + - name: setup custom resource definition + k8s: + src: "{{ remote_tmp_dir }}/files/setup-crd.yml" + + - name: wait a few seconds + pause: + seconds: 5 + + - name: add custom resource definition + k8s: + src: "{{ remote_tmp_dir }}/files/crd-resource.yml" + namespace: "{{ playbook_namespace }}" + validate: + fail_on_error: yes + strict: yes + register: unknown_kind + + - name: check that unknown kind warns + assert: + that: + - unknown_kind is successful + - "'warnings' in unknown_kind" + + always: + - name: remove custom resource + k8s: + definition: "{{ lookup('file', role_path + '/files/crd-resource.yml') }}" + namespace: "{{ playbook_namespace }}" + state: absent + ignore_errors: yes + + - name: remove custom resource definitions + k8s: + definition: "{{ lookup('file', role_path + '/files/setup-crd.yml') }}" + state: absent + + - name: Delete namespace + k8s: + state: absent + definition: + - kind: Namespace + apiVersion: v1 + metadata: + name: "{{ playbook_namespace }}" + ignore_errors: yes diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/tests/integration/targets/kubernetes/tasks/validate_not_installed.yml b/collections-debian-merged/ansible_collections/community/kubernetes/tests/integration/targets/kubernetes/tasks/validate_not_installed.yml new file mode 100644 index 00000000..aeda2522 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/tests/integration/targets/kubernetes/tasks/validate_not_installed.yml @@ -0,0 +1,25 @@ +--- +# TODO: Not available in ansible-base +# - python_requirements_info: +# dependencies: +# - openshift +# - kubernetes +# - kubernetes-validate + +- k8s: + definition: + apiVersion: v1 + kind: ConfigMap + metadata: + name: hello + namespace: default + validate: + fail_on_error: yes + ignore_errors: yes + register: k8s_no_validate + +- name: assert that k8s_no_validate fails gracefully + assert: + that: + - k8s_no_validate is failed + - "k8s_no_validate.msg == 'kubernetes-validate python library is required to validate resources'" diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/tests/sanity/ignore-2.10.txt b/collections-debian-merged/ansible_collections/community/kubernetes/tests/sanity/ignore-2.10.txt new file mode 100644 index 00000000..3f72c0e6 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/tests/sanity/ignore-2.10.txt @@ -0,0 +1,8 @@ +plugins/modules/helm.py validate-modules:parameter-type-not-in-doc +plugins/modules/helm_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/k8s.py validate-modules:parameter-type-not-in-doc +plugins/modules/k8s.py validate-modules:return-syntax-error +plugins/modules/k8s_scale.py validate-modules:parameter-type-not-in-doc +plugins/modules/k8s_scale.py validate-modules:return-syntax-error +plugins/modules/k8s_service.py validate-modules:return-syntax-error +plugins/modules/k8s_service.py validate-modules:parameter-type-not-in-doc diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/tests/sanity/ignore-2.11.txt b/collections-debian-merged/ansible_collections/community/kubernetes/tests/sanity/ignore-2.11.txt new file mode 100644 index 00000000..326af051 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/tests/sanity/ignore-2.11.txt @@ -0,0 +1,6 @@ +plugins/modules/k8s.py validate-modules:parameter-type-not-in-doc +plugins/modules/k8s.py validate-modules:return-syntax-error +plugins/modules/k8s_scale.py validate-modules:parameter-type-not-in-doc +plugins/modules/k8s_scale.py validate-modules:return-syntax-error +plugins/modules/k8s_service.py validate-modules:return-syntax-error +plugins/modules/k8s_service.py validate-modules:parameter-type-not-in-doc diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/tests/sanity/ignore-2.9.txt b/collections-debian-merged/ansible_collections/community/kubernetes/tests/sanity/ignore-2.9.txt new file mode 100644 index 00000000..9739b5d8 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/tests/sanity/ignore-2.9.txt @@ -0,0 +1,5 @@ +plugins/modules/helm.py validate-modules:parameter-type-not-in-doc +plugins/modules/helm_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/k8s.py validate-modules:parameter-type-not-in-doc +plugins/modules/k8s_scale.py validate-modules:parameter-type-not-in-doc +plugins/modules/k8s_service.py validate-modules:parameter-type-not-in-doc diff --git a/collections-debian-merged/ansible_collections/community/kubernetes/utils/downstream.sh b/collections-debian-merged/ansible_collections/community/kubernetes/utils/downstream.sh new file mode 100755 index 00000000..f96cf062 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/kubernetes/utils/downstream.sh @@ -0,0 +1,227 @@ +#!/bin/bash -eu + +# Script to dual-home the upstream and downstream Collection in a single repo +# +# This script will build or test a downstream collection, removing any +# upstream components that will not ship in the downstream release +# +# NOTES: +# - All functions are prefixed with f_ so it's obvious where they come +# from when in use throughout the script + +DOWNSTREAM_VERSION="1.1.1" +KEEP_DOWNSTREAM_TMPDIR="${KEEP_DOWNSTREAM_TMPDIR:-''}" + + +f_log_info() +{ + printf "%s:LOG:INFO: %s\n" "${0}" "${1}\n" +} + +f_prep() +{ + f_log_info "${FUNCNAME[0]}" + # Array of excluded files from downstream build (relative path) + _file_exclude=( + ) + + # Files to copy downstream (relative repo root dir path) + _file_manifest=( + CHANGELOG.rst + galaxy.yml + LICENSE + README.md + Makefile + setup.cfg + .yamllint + ) + + # Directories to recursively copy downstream (relative repo root dir path) + _dir_manifest=( + changelogs + meta + plugins + tests + molecule + ) + + # Temp build dir + _tmp_dir=$(mktemp -d) + _build_dir="${_tmp_dir}/ansible_collections/kubernetes/core" + mkdir -p "${_build_dir}" +} + +f_show_help() +{ + printf "Usage: downstream.sh [OPTION]\n" + printf "\t-s\t\tCreate a temporary downstream release and perform sanity tests.\n" + printf "\t-i\t\tCreate a temporary downstream release and perform integration tests.\n" + printf "\t-m\t\tCreate a temporary downstream release and perform molecule tests.\n" + printf "\t-b\t\tCreate a downstream release and stage for release.\n" + printf "\t-r\t\tCreate a downstream release and publish release.\n" +} + +f_text_sub() +{ + # Switch FQCN and dependent components + sed -i.bak "s/community-kubernetes/kubernetes-core/" "${_build_dir}/Makefile" + sed -i.bak "s/community\/kubernetes/kubernetes\/core/" "${_build_dir}/Makefile" + sed -i.bak "s/^VERSION\:/VERSION: ${DOWNSTREAM_VERSION}/" "${_build_dir}/Makefile" + sed -i.bak "s/community.kubernetes/kubernetes.core/" "${_build_dir}/galaxy.yml" + sed -i.bak "s/name\:.*$/name: core/" "${_build_dir}/galaxy.yml" + sed -i.bak "s/namespace\:.*$/namespace: kubernetes/" "${_build_dir}/galaxy.yml" + sed -i.bak "s/^version\:.*$/version: ${DOWNSTREAM_VERSION}/" "${_build_dir}/galaxy.yml" + find "${_build_dir}" -type f -exec sed -i.bak "s/community\.kubernetes/kubernetes\.core/g" {} \; + sed -i.bak "s/a\.k\.a\. \`kubernetes\.core\`/formerly known as \`community\.kubernetes\`/" "${_build_dir}/README.md"; + find "${_build_dir}" -type f -name "*.bak" -delete +} + +f_cleanup() +{ + f_log_info "${FUNCNAME[0]}" + if [[ -n ${KEEP_DOWNSTREAM_TMPDIR} ]]; then + if [[ -d ${_build_dir} ]]; then + rm -fr "${_build_dir}" + fi + fi +} + +# Exit and handle cleanup processes if needed +f_exit() +{ + f_cleanup + exit "$0" +} + +f_create_collection_dir_structure() +{ + f_log_info "${FUNCNAME[0]}" + # Create the Collection + for f_name in "${_file_manifest[@]}"; + do + cp "./${f_name}" "${_build_dir}/${f_name}" + done + for d_name in "${_dir_manifest[@]}"; + do + cp -r "./${d_name}" "${_build_dir}/${d_name}" + done + if [ -n "${_file_exclude:-}" ]; then + for exclude_file in "${_file_exclude[@]}"; + do + if [[ -f "${_build_dir}/${exclude_file}" ]]; then + rm -f "${_build_dir}/${exclude_file}" + fi + done + fi +} + +f_copy_collection_to_working_dir() +{ + f_log_info "${FUNCNAME[0]}" + # Copy the Collection build result into original working dir + cp "${_build_dir}"/*.tar.gz ./ +} + +f_common_steps() +{ + f_log_info "${FUNCNAME[0]}" + f_prep + f_create_collection_dir_structure + f_text_sub +} + +# Run the test sanity scanerio +f_test_sanity_option() +{ + f_log_info "${FUNCNAME[0]}" + f_common_steps + pushd "${_build_dir}" || return + f_log_info "SANITY TEST PWD: ${PWD}" + make test-sanity + popd || return + f_cleanup +} + +# Run the test integration +f_test_integration_option() +{ + f_log_info "${FUNCNAME[0]}" + f_common_steps + pushd "${_build_dir}" || return + f_log_info "INTEGRATION TEST WD: ${PWD}" + make test-integration + popd || return + f_cleanup +} + +# Run the molecule tests +f_test_molecule_option() +{ + f_log_info "${FUNCNAME[0]}" + f_common_steps + pushd "${_build_dir}" || return + f_log_info "MOLECULE TEST WD: ${PWD}" + make test-molecule + popd || return + f_cleanup +} + +# Run the release scanerio +f_release_option() +{ + f_log_info "${FUNCNAME[0]}" + f_common_steps + pushd "${_build_dir}" || return + f_log_info "RELEASE WD: ${PWD}" + make release + popd || return + f_cleanup +} + +# Run the build scanerio +f_build_option() +{ + f_log_info "${FUNCNAME[0]}" + f_common_steps + pushd "${_build_dir}" || return + f_log_info "BUILD WD: ${PWD}" + make build + popd || return + f_copy_collection_to_working_dir + f_cleanup +} + +# If no options are passed, display usage and exit +if [[ "${#}" -eq "0" ]]; then + f_show_help + f_exit 0 +fi + +# Handle options +while getopts ":simrb" option +do + case $option in + s) + f_test_sanity_option + ;; + i) + f_test_integration_option + ;; + m) + f_test_molecule_option + ;; + r) + f_release_option + ;; + b) + f_build_option + ;; + *) + printf "ERROR: Unimplemented option chosen.\n" + f_show_help + f_exit 1 + ;; # Default. + esac +done + +# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4 |