From 3667197efb7b18ec842efd504785965911f8ac4b Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Wed, 5 Jun 2024 18:18:34 +0200 Subject: Adding upstream version 10.0.0+dfsg. Signed-off-by: Daniel Baumann --- .../community/okd/.config/ansible-lint.yml | 5 + .../community/okd/.github/patchback.yml | 4 + .../community/okd/.github/settings.yml | 6 + .../community/okd/.github/stale.yml | 60 ++ .../community/okd/.github/workflows/changelog.yml | 23 + .../community/okd/.github/workflows/linters.yml | 29 + .../okd/.github/workflows/sanity-tests.yml | 23 + .../community/okd/.github/workflows/unit-tests.yml | 21 + ansible_collections/community/okd/.yamllint | 44 +- ansible_collections/community/okd/CHANGELOG.rst | 28 + ansible_collections/community/okd/FILES.json | 782 ++++++++++-------- ansible_collections/community/okd/MANIFEST.json | 6 +- ansible_collections/community/okd/Makefile | 4 +- ansible_collections/community/okd/OWNERS_ALIASES | 8 - ansible_collections/community/okd/README.md | 33 +- .../community/okd/changelogs/.plugin-cache.yaml | 92 --- .../community/okd/changelogs/changelog.yaml | 25 + .../community/okd/changelogs/config.yaml | 32 +- ansible_collections/community/okd/ci/Dockerfile | 20 +- ansible_collections/community/okd/ci/downstream.sh | 11 +- .../okd/docs/community.okd.k8s_module.rst | 72 +- .../okd/docs/community.okd.oc_connection.rst | 2 +- ...munity.okd.openshift_adm_groups_sync_module.rst | 107 ++- ...shift_adm_migrate_template_instances_module.rst | 67 +- ...mmunity.okd.openshift_adm_prune_auth_module.rst | 55 ++ ...unity.okd.openshift_adm_prune_builds_module.rst | 2 +- ....okd.openshift_adm_prune_deployments_module.rst | 57 +- ...unity.okd.openshift_adm_prune_images_module.rst | 55 ++ .../docs/community.okd.openshift_auth_module.rst | 57 +- ...community.okd.openshift_import_image_module.rst | 55 ++ .../okd/docs/community.okd.openshift_inventory.rst | 47 +- .../community.okd.openshift_process_module.rst | 66 +- ...ommunity.okd.openshift_registry_info_module.rst | 57 +- .../docs/community.okd.openshift_route_module.rst | 67 +- ansible_collections/community/okd/meta/runtime.yml | 9 +- .../community/okd/molecule/default/converge.yml | 14 +- .../okd/molecule/default/files/pod-template.yaml | 16 +- .../molecule/default/files/simple-template.yaml | 36 +- .../community/okd/molecule/default/molecule.yml | 9 +- .../community/okd/molecule/default/prepare.yml | 12 +- .../library/openshift_ldap_entry_info.py | 1 + .../openshift_adm_groups/tasks/activeDirectory.yml | 444 +++++----- .../tasks/augmentedActiveDirectory.yml | 323 ++++---- .../roles/openshift_adm_groups/tasks/main.yml | 47 +- .../tasks/python-ldap-not-installed.yml | 1 + .../roles/openshift_adm_groups/tasks/rfc2307.yml | 907 +++++++++++---------- .../openshift_adm_prune_auth_clusterroles.yml | 575 ++++++------- .../tasks/openshift_adm_prune_auth_roles.yml | 653 +++++++-------- .../tasks/openshift_adm_prune_deployments.yml | 438 +++++----- .../molecule/default/tasks/openshift_builds.yml | 477 +++++------ .../default/tasks/openshift_import_images.yml | 335 ++++---- .../default/tasks/openshift_prune_images.yml | 14 +- .../okd/molecule/default/tasks/openshift_route.yml | 12 +- .../community/okd/molecule/default/vars/main.yml | 22 +- .../community/okd/plugins/connection/oc.py | 36 +- .../community/okd/plugins/inventory/openshift.py | 137 ++-- .../community/okd/plugins/module_utils/k8s.py | 251 ++++-- .../module_utils/openshift_adm_prune_auth.py | 301 ++++--- .../openshift_adm_prune_deployments.py | 55 +- .../module_utils/openshift_adm_prune_images.py | 130 +-- .../okd/plugins/module_utils/openshift_builds.py | 159 ++-- .../okd/plugins/module_utils/openshift_common.py | 18 +- .../plugins/module_utils/openshift_docker_image.py | 59 +- .../okd/plugins/module_utils/openshift_groups.py | 209 +++-- .../module_utils/openshift_images_common.py | 138 ++-- .../plugins/module_utils/openshift_import_image.py | 134 +-- .../okd/plugins/module_utils/openshift_ldap.py | 364 +++++---- .../okd/plugins/module_utils/openshift_process.py | 14 +- .../okd/plugins/module_utils/openshift_registry.py | 74 +- .../community/okd/plugins/modules/k8s.py | 79 +- .../plugins/modules/openshift_adm_groups_sync.py | 73 +- .../openshift_adm_migrate_template_instances.py | 28 +- .../plugins/modules/openshift_adm_prune_auth.py | 43 +- .../plugins/modules/openshift_adm_prune_builds.py | 36 +- .../modules/openshift_adm_prune_deployments.py | 39 +- .../plugins/modules/openshift_adm_prune_images.py | 44 +- .../okd/plugins/modules/openshift_auth.py | 237 +++--- .../okd/plugins/modules/openshift_build.py | 66 +- .../okd/plugins/modules/openshift_import_image.py | 41 +- .../okd/plugins/modules/openshift_process.py | 46 +- .../okd/plugins/modules/openshift_registry_info.py | 35 +- .../okd/plugins/modules/openshift_route.py | 299 ++++--- ansible_collections/community/okd/requirements.yml | 3 +- ansible_collections/community/okd/setup.cfg | 3 - .../community/okd/test-requirements.txt | 1 + ansible_collections/community/okd/tests/config.yml | 3 +- .../community/okd/tests/sanity/ignore-2.16.txt | 3 + .../community/okd/tests/sanity/ignore-2.17.txt | 3 + .../community/okd/tests/sanity/requirements.yml | 5 + .../unit/plugins/module_utils/test_ldap_dn.py | 42 +- .../plugins/module_utils/test_ldap_sync_config.py | 30 +- .../module_utils/test_openshift_docker_image.py | 74 +- ansible_collections/community/okd/tox.ini | 37 + 93 files changed, 5595 insertions(+), 4121 deletions(-) create mode 100644 ansible_collections/community/okd/.config/ansible-lint.yml create mode 100644 ansible_collections/community/okd/.github/patchback.yml create mode 100644 ansible_collections/community/okd/.github/settings.yml create mode 100644 ansible_collections/community/okd/.github/stale.yml create mode 100644 ansible_collections/community/okd/.github/workflows/changelog.yml create mode 100644 ansible_collections/community/okd/.github/workflows/linters.yml create mode 100644 ansible_collections/community/okd/.github/workflows/sanity-tests.yml create mode 100644 ansible_collections/community/okd/.github/workflows/unit-tests.yml delete mode 100644 ansible_collections/community/okd/changelogs/.plugin-cache.yaml delete mode 100644 ansible_collections/community/okd/setup.cfg create mode 100644 ansible_collections/community/okd/tests/sanity/ignore-2.16.txt create mode 100644 ansible_collections/community/okd/tests/sanity/ignore-2.17.txt create mode 100644 ansible_collections/community/okd/tests/sanity/requirements.yml create mode 100644 ansible_collections/community/okd/tox.ini (limited to 'ansible_collections/community/okd') diff --git a/ansible_collections/community/okd/.config/ansible-lint.yml b/ansible_collections/community/okd/.config/ansible-lint.yml new file mode 100644 index 000000000..7c92b22ab --- /dev/null +++ b/ansible_collections/community/okd/.config/ansible-lint.yml @@ -0,0 +1,5 @@ +--- +profile: production +exclude_paths: + - molecule + - tests/sanity diff --git a/ansible_collections/community/okd/.github/patchback.yml b/ansible_collections/community/okd/.github/patchback.yml new file mode 100644 index 000000000..113fc5294 --- /dev/null +++ b/ansible_collections/community/okd/.github/patchback.yml @@ -0,0 +1,4 @@ +--- +backport_branch_prefix: patchback/backports/ +backport_label_prefix: backport- +target_branch_prefix: stable- diff --git a/ansible_collections/community/okd/.github/settings.yml b/ansible_collections/community/okd/.github/settings.yml new file mode 100644 index 000000000..35f78e110 --- /dev/null +++ b/ansible_collections/community/okd/.github/settings.yml @@ -0,0 +1,6 @@ +--- +# DO NOT MODIFY + +# Settings: https://probot.github.io/apps/settings/ +# Pull settings from https://github.com/ansible-collections/.github/blob/master/.github/settings.yml +_extends: ".github" diff --git a/ansible_collections/community/okd/.github/stale.yml b/ansible_collections/community/okd/.github/stale.yml new file mode 100644 index 000000000..230cf78a6 --- /dev/null +++ b/ansible_collections/community/okd/.github/stale.yml @@ -0,0 +1,60 @@ +--- +# Configuration for probot-stale - https://github.com/probot/stale + +# Number of days of inactivity before an Issue or Pull Request becomes stale +daysUntilStale: 90 + +# Number of days of inactivity before an Issue or Pull Request with the stale +# label is closed. Set to false to disable. If disabled, issues still need to be +# closed manually, but will remain marked as stale. +daysUntilClose: 30 + +# Only issues or pull requests with all of these labels are check if stale. +# Defaults to `[]` (disabled) +onlyLabels: [] + +# Issues or Pull Requests with these labels will never be considered stale. Set +# to `[]` to disable +exemptLabels: + - security + - planned + - priority/critical + - lifecycle/frozen + - verified + +# Set to true to ignore issues in a project (defaults to false) +exemptProjects: false + +# Set to true to ignore issues in a milestone (defaults to false) +exemptMilestones: true + +# Set to true to ignore issues with an assignee (defaults to false) +exemptAssignees: false + +# Label to use when marking as stale +staleLabel: lifecycle/stale + +# Limit the number of actions per hour, from 1-30. Default is 30 +limitPerRun: 30 + +pulls: + markComment: |- + PRs go stale after 90 days of inactivity. + If there is no further activity, the PR will be closed in another 30 days. + + unmarkComment: >- + This pull request is no longer stale. + + closeComment: >- + This pull request has been closed due to inactivity. + +issues: + markComment: |- + Issues go stale after 90 days of inactivity. + If there is no further activity, the issue will be closed in another 30 days. + + unmarkComment: >- + This issue is no longer stale. + + closeComment: >- + This issue has been closed due to inactivity. diff --git a/ansible_collections/community/okd/.github/workflows/changelog.yml b/ansible_collections/community/okd/.github/workflows/changelog.yml new file mode 100644 index 000000000..569e334fe --- /dev/null +++ b/ansible_collections/community/okd/.github/workflows/changelog.yml @@ -0,0 +1,23 @@ +--- +name: Changelog +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +on: + pull_request: + types: + - opened + - reopened + - labeled + - unlabeled + - synchronize + branches: + - main + - stable-* + tags: + - '*' + +jobs: + changelog: + uses: ansible-network/github_actions/.github/workflows/changelog.yml@main diff --git a/ansible_collections/community/okd/.github/workflows/linters.yml b/ansible_collections/community/okd/.github/workflows/linters.yml new file mode 100644 index 000000000..11258aaf3 --- /dev/null +++ b/ansible_collections/community/okd/.github/workflows/linters.yml @@ -0,0 +1,29 @@ +--- +name: Linters +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +on: + pull_request: + types: + - opened + - reopened + - labeled + - unlabeled + - synchronize + branches: + - main + - stable-* + tags: + - '*' +jobs: + linters: + uses: ansible-network/github_actions/.github/workflows/tox-linters.yml@main + ansible-lint: + runs-on: ubuntu-latest + steps: + - uses: ansible-network/github_actions/.github/actions/checkout_dependency@main + + - name: Run ansible-lint + uses: ansible/ansible-lint@v6.21.0 diff --git a/ansible_collections/community/okd/.github/workflows/sanity-tests.yml b/ansible_collections/community/okd/.github/workflows/sanity-tests.yml new file mode 100644 index 000000000..49359de9b --- /dev/null +++ b/ansible_collections/community/okd/.github/workflows/sanity-tests.yml @@ -0,0 +1,23 @@ +--- +name: Sanity tests +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +on: + pull_request: + types: + - opened + - reopened + - synchronize + branches: + - main + - stable-* + tags: + - '*' + +jobs: + sanity: + uses: ansible-network/github_actions/.github/workflows/sanity.yml@main + with: + collection_pre_install: '-r source/tests/sanity/requirements.yml' diff --git a/ansible_collections/community/okd/.github/workflows/unit-tests.yml b/ansible_collections/community/okd/.github/workflows/unit-tests.yml new file mode 100644 index 000000000..d3aa3b0ce --- /dev/null +++ b/ansible_collections/community/okd/.github/workflows/unit-tests.yml @@ -0,0 +1,21 @@ +--- +name: Unit tests +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +on: + pull_request: + types: + - opened + - reopened + - synchronize + branches: + - main + - stable-* + tags: + - '*' + +jobs: + unit-source: + uses: ansible-network/github_actions/.github/workflows/unit_source.yml@main diff --git a/ansible_collections/community/okd/.yamllint b/ansible_collections/community/okd/.yamllint index 882767605..ac5297cdf 100644 --- a/ansible_collections/community/okd/.yamllint +++ b/ansible_collections/community/okd/.yamllint @@ -1,33 +1,15 @@ --- -# Based on ansible-lint config -extends: default - rules: - braces: - max-spaces-inside: 1 - level: error - brackets: - max-spaces-inside: 1 - level: error - colons: - max-spaces-after: -1 - level: error - commas: - max-spaces-after: -1 - level: error - comments: disable - comments-indentation: disable - document-start: disable - empty-lines: - max: 3 - level: error - hyphens: - level: error - indentation: disable - key-duplicates: enable - line-length: disable - new-line-at-end-of-file: disable - new-lines: - type: unix - trailing-spaces: disable - truthy: disable + indentation: + ignore: &default_ignores | + # automatically generated, we can't control it + changelogs/changelog.yaml + # Will be gone when we release and automatically reformatted + changelogs/fragments/* + document-start: + ignore: *default_ignores + line-length: + ignore: *default_ignores + max: 160 + +ignore-from-file: .gitignore diff --git a/ansible_collections/community/okd/CHANGELOG.rst b/ansible_collections/community/okd/CHANGELOG.rst index b86a7c409..254bf5393 100644 --- a/ansible_collections/community/okd/CHANGELOG.rst +++ b/ansible_collections/community/okd/CHANGELOG.rst @@ -5,6 +5,34 @@ OKD Collection Release Notes .. contents:: Topics +v3.0.1 +====== + +Release Summary +--------------- + +This patch release fixes an issue in building the downstream collection. + + +v3.0.0 +====== + +Release Summary +--------------- + +This major release drops support for ansible-core versions lower than 2.14 and Python versions lower than 3.9. It also deprecates ``openshift`` inventory plugin. + +Breaking Changes / Porting Guide +-------------------------------- + +- Bump minimum Python suupported version to 3.9 (https://github.com/openshift/community.okd/pull/202). +- Remove support for ansible-core < 2.14 (https://github.com/openshift/community.okd/pull/202). + +Deprecated Features +------------------- + +- openshift - the ``openshift`` inventory plugin has been deprecated and will be removed in release 4.0.0 (https://github.com/ansible-collections/kubernetes.core/issues/31). + v2.3.0 ====== diff --git a/ansible_collections/community/okd/FILES.json b/ansible_collections/community/okd/FILES.json index f4086aab1..de9bf3950 100644 --- a/ansible_collections/community/okd/FILES.json +++ b/ansible_collections/community/okd/FILES.json @@ -8,1067 +8,1151 @@ "format": 1 }, { - "name": ".yamllint", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "5845e18e9f23155f423207df9abac970aed687c638620bc2c9ee06706191054b", - "format": 1 - }, - { - "name": "tests", + "name": ".config", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/config.yml", + "name": ".config/ansible-lint.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "9a009a349eaaf78c93ff56072d2ef171937bdb884e4976592ab5aaa9c68e1044", + "chksum_sha256": "2bbd9eb3d8f8d6af779720000dc297955b780ab053807e2dfc5a59f24707b530", "format": 1 }, { - "name": "tests/unit", + "name": ".github", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/unit/requirements.txt", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "8004a972c3d5c274d8c808e8d8afe03b9aca8af8eebf3df4298f114d8008b754", - "format": 1 - }, - { - "name": "tests/unit/plugins", + "name": ".github/workflows", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/unit/plugins/module_utils", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, + "name": ".github/workflows/changelog.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "859dc36574b2741d827d47c03bdc725646c486e8c3c7c975f7bb2e29c750ca41", "format": 1 }, { - "name": "tests/unit/plugins/module_utils/test_openshift_docker_image.py", + "name": ".github/workflows/linters.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "2645a4d02f3adc9e4bbb8b69ecb9b327836c142d1115f616c2e0eb05f0414299", + "chksum_sha256": "4e72e9b4d746793d9b2766bac960c2535db37061613792c0559113415e7ac196", "format": 1 }, { - "name": "tests/unit/plugins/module_utils/test_ldap_dn.py", + "name": ".github/workflows/sanity-tests.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "5e692237cc900dce4c6bd3a1512c4027e6bb2c0a23e5d473a55c9137751c3559", + "chksum_sha256": "e1c58db0639ceee1498911aa4c265166f1ad510a09567bc9a06fe7bd0b210619", "format": 1 }, { - "name": "tests/unit/plugins/module_utils/test_ldap_sync_config.py", + "name": ".github/workflows/unit-tests.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "d1647944d9370d05aedc63b115f427d037a3354f670b45e94bb3f4f4f95b162f", + "chksum_sha256": "a774d39eadf33235ba9490d9bfa69be9659533c18be73e75212fa11084bad4d2", "format": 1 }, { - "name": "tests/unit/plugins/modules", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, + "name": ".github/patchback.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ed69f87ea46171cb574fb77dc74fdbd7a269d4cad8d5ba6494d64d99842ef8e4", "format": 1 }, { - "name": "tests/unit/plugins/modules/test_openshift_adm_migrate_template_instances.py", + "name": ".github/settings.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "4c426c87d28313524cbd378f917a2d70242f333571f942d44bc9bf8c7e591586", + "chksum_sha256": "bec830af2dc0266fc961c9c47bff160476c32878720cd1d7902581ec30a15c18", "format": 1 }, { - "name": "tests/sanity", + "name": ".github/stale.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "923b49f6fb8b325ea890d05a42537b3f9c5aaf26b64a704c0fef4b696aa6a4bb", + "format": 1 + }, + { + "name": "changelogs", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/sanity/ignore-2.14.txt", + "name": "changelogs/changelog.yaml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "43bc048d24e8dd8fca8052b6135daaf59207e462f72cb9848f65de9c6e09552c", + "chksum_sha256": "dc1cb3339cbc9332acb6d7e964b9deaddaf6b371bd954d50376edb07389be62d", "format": 1 }, { - "name": "tests/sanity/ignore-2.10.txt", + "name": "changelogs/config.yaml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "ba2285dad70183ce56e9d43ed97d4b7a1b12e7cdd3f4e5856fb3cf380148a5ee", + "chksum_sha256": "77ecb61675531e89ac5c09471a86ca6fe440167361fb6d42e30b8208c2514809", "format": 1 }, { - "name": "tests/sanity/ignore-2.13.txt", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "43bc048d24e8dd8fca8052b6135daaf59207e462f72cb9848f65de9c6e09552c", + "name": "ci", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, "format": 1 }, { - "name": "tests/sanity/ignore-2.9.txt", + "name": "ci/Dockerfile", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "cf94385a1d8a2b51f40f88879b59922c8a8b07b8f1e0ac511a2454e257361995", + "chksum_sha256": "10c6a10838ba7e78ec6b0782d45217311b7faf7c80d466c9e23a52ac9447efa0", "format": 1 }, { - "name": "tests/sanity/ignore-2.12.txt", + "name": "ci/doc_fragment_modules.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "43bc048d24e8dd8fca8052b6135daaf59207e462f72cb9848f65de9c6e09552c", + "chksum_sha256": "e5f5e24396a2cc86faedf6d0b7a3835db06bbe6fc7b534018ed85ebecfa39e09", "format": 1 }, { - "name": "tests/sanity/ignore-2.11.txt", + "name": "ci/downstream.sh", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "dc0294c142585d8904331fd57755efc876e592ce8242e42e1159bcfddd4adbba", + "chksum_sha256": "acc4c04397c247ae716e133f9df25c8a301b606416425a6fdbf191e096ab76b0", "format": 1 }, { - "name": "tests/sanity/ignore-2.15.txt", + "name": "ci/downstream_fragments.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "43bc048d24e8dd8fca8052b6135daaf59207e462f72cb9848f65de9c6e09552c", + "chksum_sha256": "9815e2aa84aef5d396fd07ddb371fa87362ade6fbe72d829e4d5983abe8af453", "format": 1 }, { - "name": "README.md", + "name": "ci/incluster_integration.sh", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "a49ac5f1fa3cdb032a5274e9d0d8a8fadcc5dd3ee1a0d3f872ca969a22be3056", + "chksum_sha256": "012ffb20a006094a4e8a00ead7947a196c477c4eb071a1dad5ffe42f509935e1", "format": 1 }, { - "name": "changelogs", + "name": "docs", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "changelogs/changelog.yaml", + "name": "docs/ansible_turbo_mode.rst", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "36a4108575c00ad25e2774bcae35c32e4022d8d1b563ed72e536ec796b22b46f", + "chksum_sha256": "a9bcd000c20de1d1ad35f5516f4fdffce07c8e28a5ebee572629a3b9cb867152", "format": 1 }, { - "name": "changelogs/config.yaml", + "name": "docs/community.okd.k8s_module.rst", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "46bf50de40f3abb6bc0497f3b188dba66c295a2c764337abf4dda636644cb0d9", + "chksum_sha256": "07204d20c2e6d6c65841abed30d0801ddb26112e74ca544f364781feda2c5008", "format": 1 }, { - "name": "changelogs/.plugin-cache.yaml", + "name": "docs/community.okd.oc_connection.rst", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "e9fb32d9dfa89439d04e50697ea1e8a5c77a2411dd6a061d2eddb5e328753abf", + "chksum_sha256": "92709a3843099a4df82f3fb7978b6dc45c9a607387e490dbeaa37d68e2969b13", "format": 1 }, { - "name": "CONTRIBUTING.md", + "name": "docs/community.okd.openshift_adm_groups_sync_module.rst", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "4156016c23606d288b98c4fa0eafc6afe1cf28e695fb5a267bcc3d337a7bfef0", + "chksum_sha256": "11db9878e4227634e58b1512dee0b3893191edec658adb65691756c16f5174e8", "format": 1 }, { - "name": "requirements.txt", + "name": "docs/community.okd.openshift_adm_migrate_template_instances_module.rst", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "d20fa49248fb968b70381346d2b2f6195e19dc1ddded06856bc4aefa6a25d431", + "chksum_sha256": "c91df00cdec670c92d452cc7d10abaebd705756b7bb643c767c2309a22bd6859", "format": 1 }, { - "name": "meta", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, + "name": "docs/community.okd.openshift_adm_prune_auth_module.rst", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f16dbc021ee73369585efa88fd7d56bf02898317e8602f303eab9d6601d4ddf0", "format": 1 }, { - "name": "meta/runtime.yml", + "name": "docs/community.okd.openshift_adm_prune_builds_module.rst", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "369f3c5a13d3d8400d0f662ec26d211c4e135f1b8b661c66346d899ad371b534", + "chksum_sha256": "b0662928fe5aff419369743708efe29cb0e951c19e41ab7bb26dc23969cf5d1d", "format": 1 }, { - "name": ".gitignore", + "name": "docs/community.okd.openshift_adm_prune_deployments_module.rst", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "8344abb7aafbdb654f0c88532af7ab557dad7a6a7cba60f4834f33c2a1f8524f", + "chksum_sha256": "7e4781b3a321298041396560a8764bcbfb8ddbd3c0bac20a54fb5cb046e3c4d8", "format": 1 }, { - "name": "test-requirements.txt", + "name": "docs/community.okd.openshift_adm_prune_images_module.rst", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "34db46c861b289148eb3ebff5ff7786aa091eec38c0f3d1606bf2e3d9f030b94", + "chksum_sha256": "d273a042aaabf92298b5a38f887e0593047472314afe36f831f9a9aa3989ced5", "format": 1 }, { - "name": "codecov.yml", + "name": "docs/community.okd.openshift_auth_module.rst", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "caa848a2e02be5014890c5cbc7727e9a00d40394637c90886eb813d60f82c9c3", + "chksum_sha256": "d7e0110a88d5e2d1033f803cfef7ab821eef3fd2aeba9ab3d16203f336f39394", "format": 1 }, { - "name": "setup.cfg", + "name": "docs/community.okd.openshift_build_module.rst", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "125fefc5c9470b8e72cffc06937c30e2bc073f4ca6c1a593f131a6e1fd76edf2", + "chksum_sha256": "50c1219f5524d60a4b46bc58c6df30d8af7b6e6ecc20fd4d15cf3e0fc352c05e", "format": 1 }, { - "name": "OWNERS", + "name": "docs/community.okd.openshift_import_image_module.rst", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "771ebefb6c2b0154bf043cc537cc55b8421ddd4058248b00a62cdc9b190131da", + "chksum_sha256": "889c62212325e583f2a36fb0dd66f392d6bd4058af089c1f829f6f97972577d1", "format": 1 }, { - "name": "LICENSE", + "name": "docs/community.okd.openshift_inventory.rst", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "8b1ba204bb69a0ade2bfcf65ef294a920f6bb361b317dba43c7ef29d96332b9b", + "chksum_sha256": "e5df6651617abc50c0b054c34e57601bc7ea889167800658b1013d61beb236e2", "format": 1 }, { - "name": "OWNERS_ALIASES", + "name": "docs/community.okd.openshift_process_module.rst", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "fee153ef8daa4ed6170434daf598b3efdcaf80db39e5b4ef9f2ab8cd4f8f4c30", + "chksum_sha256": "ca7f93a6fa0ac3d38f982076ef491438338bb7face86bede1799de9bdb66175a", "format": 1 }, { - "name": "plugins", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, + "name": "docs/community.okd.openshift_registry_info_module.rst", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e980ae3c9f683a90b9cfbb79aa36cb345a262a16fd4d2079e0147e9124d376e3", "format": 1 }, { - "name": "plugins/doc_fragments", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, + "name": "docs/community.okd.openshift_route_module.rst", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e23a6df2370bc8bebc1e77dd70ac239d15ade8cc1eedb0c3201692d2218f20d0", "format": 1 }, { - "name": "plugins/module_utils", + "name": "meta", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "plugins/module_utils/openshift_common.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "cbbbbe8693629763781768801033674d80fdcc5ae2b5d19eb35922bc50eee8ed", - "format": 1 - }, - { - "name": "plugins/module_utils/openshift_registry.py", + "name": "meta/runtime.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "a2a40dbcae3074de050c5d9be2d1e4352cf0698ad8385fd81c03ce73805a6e93", + "chksum_sha256": "cc700e426626ae08f404ae2fe82224e9dd7e98a7e1a3ab91823fd7cf3930feac", "format": 1 }, { - "name": "plugins/module_utils/openshift_adm_prune_deployments.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "8fa9b77a5d2326893c1ecd9841d4b9e30e3e7bd788d7455efcbad45d010683f5", + "name": "molecule", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, "format": 1 }, { - "name": "plugins/module_utils/openshift_images_common.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d61fbb7ced206c7cbde4a610e643aa4a824e7856648ba598c5f630978bbafdaa", + "name": "molecule/default", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, "format": 1 }, { - "name": "plugins/module_utils/openshift_process.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a7f5b99e0f07aba90fd2744f6311c82a59698424e5904ceda7b2e0eaa3e080af", + "name": "molecule/default/files", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, "format": 1 }, { - "name": "plugins/module_utils/openshift_builds.py", + "name": "molecule/default/files/crd-resource.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "f7b18ce9225cb1fb06a970a591662ab9cd1c3015f18bae86570e4f2345b0449e", + "chksum_sha256": "4921362ac3c4afac5f42ebb90b37bcb75e1fe20929bb0e45d0df4c190d28f577", "format": 1 }, { - "name": "plugins/module_utils/openshift_ldap.py", + "name": "molecule/default/files/example.env", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "894c0aa5a40d6c227b73cb83c3e4217a6ed081cda9c3a89a0606b66a65b52287", + "chksum_sha256": "3701367ea0a79158d476e168c1b1e64639f703da926e63045cba5cfdbb6c3576", "format": 1 }, { - "name": "plugins/module_utils/openshift_import_image.py", + "name": "molecule/default/files/kuard-extra-property.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "47819d3ab99b61593b5db881b6d93855caf45ae3df71b77ad7b6528a1b51bbbd", + "chksum_sha256": "27a06de7ca760698dc1d3c46ef12e40e214ae319a791a3965806b30adca06de0", "format": 1 }, { - "name": "plugins/module_utils/k8s.py", + "name": "molecule/default/files/kuard-invalid-type.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "386788c253525ffe4350e4b6fbbc1380e03f703bfe6fa3ca4506615e93b9173d", + "chksum_sha256": "c0e8aa083eecb5c4d92108af83f9a2c931a2e5bbb766af4792adc4fb9fd9d32d", "format": 1 }, { - "name": "plugins/module_utils/openshift_adm_prune_images.py", + "name": "molecule/default/files/nginx.env", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "cd027fb0716b5c92ac6176f6a46236c0379e9680cd9d743c5fd40617461502f8", + "chksum_sha256": "a68c2914576119a809943116ce0050ca221c5b8f3a6722fa6abbf15dababde5f", "format": 1 }, { - "name": "plugins/module_utils/openshift_adm_prune_auth.py", + "name": "molecule/default/files/pod-template.yaml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "76a26d3ab75396a28fc546c19b1a8e215aa8a5fd59fc4491539e2ef6d5537d8c", + "chksum_sha256": "255bfc8684edf514bac8d4da1ca2c53c26ef3cf190d68e9cbdf7423254e9407e", "format": 1 }, { - "name": "plugins/module_utils/openshift_docker_image.py", + "name": "molecule/default/files/setup-crd.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "d23dfbad39579c35a2a39bbf0e18e2e0e8bc3a0122c37d0fc87c2ea125f5af2c", + "chksum_sha256": "509878fff22a19715f1c491930eefd23430c0f571716b463c3ab9a754d0fb250", "format": 1 }, { - "name": "plugins/module_utils/openshift_groups.py", + "name": "molecule/default/files/simple-template.yaml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "f583086177b63a87fca75c3d41d1bce4bc4cecaf68c60a9008ca1ecb31d1c38a", + "chksum_sha256": "4df5821a40d53323c398a786ffa81cbdb22aa759f54ffdf98d9d3f4e6f119604", "format": 1 }, { - "name": "plugins/connection", + "name": "molecule/default/roles", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "plugins/connection/oc.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "cb192532153b1625ec97437f5ac7177cfa78d8cba5c5eaa4633fab63ba54ca09", + "name": "molecule/default/roles/openshift_adm_groups", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, "format": 1 }, { - "name": "plugins/inventory", + "name": "molecule/default/roles/openshift_adm_groups/defaults", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "plugins/inventory/openshift.py", + "name": "molecule/default/roles/openshift_adm_groups/defaults/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "bb798a479803548cc8eeddb6fdb565d1b0f7b3222af445c42447247203953849", + "chksum_sha256": "1bd380f6c0cdf0c2b25e6bf27e4304566f6b9efbbeaf15da3847a9744d685a72", "format": 1 }, { - "name": "plugins/modules", + "name": "molecule/default/roles/openshift_adm_groups/library", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "plugins/modules/openshift_adm_groups_sync.py", + "name": "molecule/default/roles/openshift_adm_groups/library/openshift_ldap_entry.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "80197c296c50931b3b0ed2a4841dd46ab98b6d0de3c923ddc8d60abd49cdecee", + "chksum_sha256": "8e0d7d997ba74938fef19fead2cf1d2151644ad999e2285b0d769c89804abf5e", "format": 1 }, { - "name": "plugins/modules/openshift_registry_info.py", + "name": "molecule/default/roles/openshift_adm_groups/library/openshift_ldap_entry_info.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "abf945aba0bb5d769783c7e3ee1b19c2e4cd4f9e932a51e88dcb298a664cc677", + "chksum_sha256": "ccbf989315dfac69f1a62fb08067f4359e5b2a13d33baa9fba4ccbcbb8ad7f47", "format": 1 }, { - "name": "plugins/modules/openshift_auth.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a293bf87c5f13c610fd2e6032a76e2c813bfdae4953c7b33e2c6cbf3bf8249e1", + "name": "molecule/default/roles/openshift_adm_groups/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, "format": 1 }, { - "name": "plugins/modules/openshift_route.py", + "name": "molecule/default/roles/openshift_adm_groups/meta/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "78e6b48f0dcc76cecc8c0272768cfdeab62364b3658984fdddf888492f61dd03", + "chksum_sha256": "8d57e1f764ed7af7cf34d0b039ef97eb206fd846d31025ac08ddefa855a83204", "format": 1 }, { - "name": "plugins/modules/openshift_build.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a0aff179625071a085d0d9014cfa8999dceb96ba2394db5f33991eca1a9c85c5", + "name": "molecule/default/roles/openshift_adm_groups/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, "format": 1 }, { - "name": "plugins/modules/openshift_adm_prune_deployments.py", + "name": "molecule/default/roles/openshift_adm_groups/tasks/activeDirectory.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "9dc9dcfce2107deecd3da2144f0dee455a442afcd04428c042bd48d598978bd2", + "chksum_sha256": "522791250e95623a0750d13e335db874f3afc888dd513b234d4de0553ba0bcf8", "format": 1 }, { - "name": "plugins/modules/openshift_process.py", + "name": "molecule/default/roles/openshift_adm_groups/tasks/augmentedActiveDirectory.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "271370fe6c7eba007bd282d4bea738100278b4294df88d55b69f951309867a4e", + "chksum_sha256": "9685339b4984a7d72f2fcf56e2f444298f789539243205d70288a9211c52d9f9", "format": 1 }, { - "name": "plugins/modules/openshift_import_image.py", + "name": "molecule/default/roles/openshift_adm_groups/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "dc18021e7be860b0accbe89fc51e2de1ca03938bb159907ba45558527f808c2f", + "chksum_sha256": "dbf4a75120762ad728524e355345807111fb02dd4be9496ee5ecd00b5a7a9fc2", "format": 1 }, { - "name": "plugins/modules/k8s.py", + "name": "molecule/default/roles/openshift_adm_groups/tasks/python-ldap-not-installed.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "ffca7a5e4ed3d2122106a601b511589507b99ab65a138e4c912dcd76fcb2d430", + "chksum_sha256": "243678c2bbdca70ab4064027e32ad81c1e50dc59e6af595a9fdf29c789487322", "format": 1 }, { - "name": "plugins/modules/openshift_adm_prune_images.py", + "name": "molecule/default/roles/openshift_adm_groups/tasks/rfc2307.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "c1f6d6e041d08ccd548a812c6bee4a972b260ebb86dbe35783140001c9bea681", + "chksum_sha256": "1bed08f558010eac88ba66c291c828866843784939e12d4fbc0cb55593bb2e90", "format": 1 }, { - "name": "plugins/modules/openshift_adm_prune_auth.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "63c667799f92fdf025b577e2a6a3c84220389e35d357d95177f7acdb773d3da2", + "name": "molecule/default/roles/openshift_adm_groups/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, "format": 1 }, { - "name": "plugins/modules/openshift_adm_prune_builds.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d40236b25580fa9fb242d42b43304865f51fceef82d6e1eb0ca158cf18a6253e", + "name": "molecule/default/roles/openshift_adm_groups/templates/ad", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, "format": 1 }, { - "name": "plugins/modules/openshift_adm_migrate_template_instances.py", + "name": "molecule/default/roles/openshift_adm_groups/templates/ad/definition.j2", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "0f6f0fd8d617700bdb22953555bb6fe2dd7dbe9c297264cf191cfca651dbb0b4", + "chksum_sha256": "f59a7ee4f37de0040f293b615b45092f5bc9fb36f832d81ea6f0a7155bb2c1bd", "format": 1 }, { - "name": "Makefile", + "name": "molecule/default/roles/openshift_adm_groups/templates/ad/sync-config.j2", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "afa91ff62ed42d4b0fb73fa47899a01cde478809866d99c63787130074d5a039", - "format": 1 - }, - { - "name": "molecule", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, + "chksum_sha256": "fef496b8ba026e34cdef374f373a15948db2bf6147bff5eb4ebdba4d1b3a4381", "format": 1 }, { - "name": "molecule/default", + "name": "molecule/default/roles/openshift_adm_groups/templates/augmented-ad", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "molecule/default/prepare.yml", + "name": "molecule/default/roles/openshift_adm_groups/templates/augmented-ad/definition.j2", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "d9765b6cd95ab121281ca53b24f5a0954b211801ca070aa9402f746e593195df", + "chksum_sha256": "a2db659cc8d7467e4d9e78029027d6fff6cc1fd38e54c6ca954a9a64633470d0", "format": 1 }, { - "name": "molecule/default/README.md", + "name": "molecule/default/roles/openshift_adm_groups/templates/augmented-ad/sync-config.j2", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "75d6935060001109bea00ffcf6d289b29e7aa6afeaba2e1e1b85d767062f4da8", + "chksum_sha256": "76c20cd498ced5c2417a71611b35bde1b22b537fd106b5d8fecf1b896d7d5074", "format": 1 }, { - "name": "molecule/default/tasks", + "name": "molecule/default/roles/openshift_adm_groups/templates/rfc2307", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "molecule/default/tasks/openshift_import_images.yml", + "name": "molecule/default/roles/openshift_adm_groups/templates/rfc2307/definition.j2", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "0697c9bcd3f8b67cc778ddd5a4eaedf170f925501dae4e5836434b897a30d5dd", + "chksum_sha256": "5e2d7b8287dd1b1ad0067eb8a38d282ff43f95e3a91c3778b86205a14988b434", "format": 1 }, { - "name": "molecule/default/tasks/validate_installed.yml", + "name": "molecule/default/roles/openshift_adm_groups/templates/rfc2307/sync-config.j2", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "a6abad4a4d33a77ca736a4835db0f5d11dc7742100ff4d9d36a9942e1f4b55e9", + "chksum_sha256": "64cdf5017cb257f3b26d7eb858c8295313684b1147f0baee3f657fc3dc36c1f9", "format": 1 }, { - "name": "molecule/default/tasks/openshift_adm_prune_deployments.yml", + "name": "molecule/default/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "molecule/default/tasks/openshift_adm_prune_auth_clusterroles.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "16b97fda417204c8df08a294c4f46011ad6bc617964d3b2a8c860134793ea175", + "chksum_sha256": "cf0ce2145e226c480622e9166a3f3792948f8ab671dfc71b49fd8a334ce7db36", "format": 1 }, { - "name": "molecule/default/tasks/openshift_builds.yml", + "name": "molecule/default/tasks/openshift_adm_prune_auth_roles.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "dd3532196ebb99d4a467fb2cc4b958a1d5e551969c00224b07694c76b959dd45", + "chksum_sha256": "503cbc3b57838463568fc4d92a5cdf273eefe94d3585d8ca8c095385a68fc7e6", "format": 1 }, { - "name": "molecule/default/tasks/openshift_adm_prune_auth_roles.yml", + "name": "molecule/default/tasks/openshift_adm_prune_deployments.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "d83689cc8e59c041386545fb0afdde5ec3157bea33db35303529595ddffb5fe9", + "chksum_sha256": "9f279837a7539985a4663f0522186d704675c4c55fc8145f1c4e7c6905fe988c", "format": 1 }, { - "name": "molecule/default/tasks/validate_not_installed.yml", + "name": "molecule/default/tasks/openshift_auth.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "9d493e30e52e26c73dd2d197e74b4d27776d01f27fb5cfd6ef982db157484cbb", + "chksum_sha256": "0d8b02f57bd1ff3b9a65d46c40914a2ff765f6bd96901ebfa7ce1751154d1802", "format": 1 }, { - "name": "molecule/default/tasks/openshift_prune_images.yml", + "name": "molecule/default/tasks/openshift_builds.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "706dec431bd424212a1b43e7c7aa86bc9a01c3fd392d170f6486b1732ebcc317", + "chksum_sha256": "f9e00bcbc3d7758ccc307207c0251d968e028cacca64bade4b73675fa6969004", "format": 1 }, { - "name": "molecule/default/tasks/openshift_auth.yml", + "name": "molecule/default/tasks/openshift_import_images.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "0d8b02f57bd1ff3b9a65d46c40914a2ff765f6bd96901ebfa7ce1751154d1802", + "chksum_sha256": "5c295e6733b977323a9113220137891428e47582ebca88a3f2186b8d5f55a905", "format": 1 }, { - "name": "molecule/default/tasks/openshift_route.yml", + "name": "molecule/default/tasks/openshift_process.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "20d0f4944902de207884fe0a0a987070b9b3716a436d76264400cb67f53c38c3", + "chksum_sha256": "999932ada001df7be263ab2158592c6325d2827ee372ad38e9f0197b807cbf52", "format": 1 }, { - "name": "molecule/default/tasks/openshift_adm_prune_auth_clusterroles.yml", + "name": "molecule/default/tasks/openshift_prune_images.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "bc631199609c00aa4f669ccde1c351921ad43eec335c7abe6185f97c2b903f11", + "chksum_sha256": "7b2b85f65cf92d5a572f7f85cc006d5fd7b6ebc2d913f8ae9f16d2def362d071", "format": 1 }, { - "name": "molecule/default/tasks/openshift_process.yml", + "name": "molecule/default/tasks/openshift_route.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "999932ada001df7be263ab2158592c6325d2827ee372ad38e9f0197b807cbf52", + "chksum_sha256": "3d4fc8dd62b012f339a517d0e7c73767031bda416b4bfac1975c5425ee462c8b", "format": 1 }, { - "name": "molecule/default/destroy.yml", + "name": "molecule/default/tasks/validate_installed.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "26670c28263a4f354e646c222e2c59ccfcb3cd807fe07d531e9874fc3748a44c", + "chksum_sha256": "a6abad4a4d33a77ca736a4835db0f5d11dc7742100ff4d9d36a9942e1f4b55e9", "format": 1 }, { - "name": "molecule/default/roles", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, + "name": "molecule/default/tasks/validate_not_installed.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9d493e30e52e26c73dd2d197e74b4d27776d01f27fb5cfd6ef982db157484cbb", "format": 1 }, { - "name": "molecule/default/roles/openshift_adm_groups", + "name": "molecule/default/vars", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "molecule/default/roles/openshift_adm_groups/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, + "name": "molecule/default/vars/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d065ea53707a373ac5928ee6091224569ac791023daa1cf26f70a099812a51cf", "format": 1 }, { - "name": "molecule/default/roles/openshift_adm_groups/tasks/python-ldap-not-installed.yml", + "name": "molecule/default/README.md", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "05a6759b51bcd97f154db6d344367c55eb424533163fccce1493c49dd5913973", + "chksum_sha256": "75d6935060001109bea00ffcf6d289b29e7aa6afeaba2e1e1b85d767062f4da8", "format": 1 }, { - "name": "molecule/default/roles/openshift_adm_groups/tasks/main.yml", + "name": "molecule/default/converge.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "d7b676aa4dd79463ad48beebb3b29dd7d73f65bad7df406ea8787214a9fa7004", + "chksum_sha256": "2da549e774063f37985fa720bb55ce6a5d70375d1ac2171564c29b63794342bf", "format": 1 }, { - "name": "molecule/default/roles/openshift_adm_groups/tasks/augmentedActiveDirectory.yml", + "name": "molecule/default/destroy.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "ad25bcfb4431257ab481f12cfcbae23620d8826fa25d38e93ed9a81ef646aa68", + "chksum_sha256": "26670c28263a4f354e646c222e2c59ccfcb3cd807fe07d531e9874fc3748a44c", "format": 1 }, { - "name": "molecule/default/roles/openshift_adm_groups/tasks/activeDirectory.yml", + "name": "molecule/default/molecule.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "5e2758c9d39c94892ca9976f8e5d7b360bfccb134ea7b95f83ba51c46b1d9195", + "chksum_sha256": "53e1ed3199bf98922cbabb2d30dc2b4df6594790b0dee4cb271aaeba739bdf72", "format": 1 }, { - "name": "molecule/default/roles/openshift_adm_groups/tasks/rfc2307.yml", + "name": "molecule/default/prepare.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "87966533bdb6f1f62208f8c6e85ba38acdb3dfd44c6312eb5dca5026b4c9ea0e", + "chksum_sha256": "e346146e7c1bf70e7255c26241de861c5df4cc1fcc00d3f66c10aa14ce883d0d", "format": 1 }, { - "name": "molecule/default/roles/openshift_adm_groups/library", + "name": "molecule/default/verify.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0aa66e5d35f985e47345d47e378816189d77cb131520e115f05c8eea2ab3bfc6", + "format": 1 + }, + { + "name": "plugins", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "molecule/default/roles/openshift_adm_groups/library/openshift_ldap_entry.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "8e0d7d997ba74938fef19fead2cf1d2151644ad999e2285b0d769c89804abf5e", + "name": "plugins/connection", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, "format": 1 }, { - "name": "molecule/default/roles/openshift_adm_groups/library/openshift_ldap_entry_info.py", + "name": "plugins/connection/oc.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "0b98d02bb07c0ae7ba4da80ef95c02d2476a0106bd8f198779d296c81323bbfe", + "chksum_sha256": "1fcf101432828f7519c411ec7b1be81f42a366759cc07935cc34ad1b85edac6f", "format": 1 }, { - "name": "molecule/default/roles/openshift_adm_groups/meta", + "name": "plugins/inventory", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "molecule/default/roles/openshift_adm_groups/meta/main.yml", + "name": "plugins/inventory/openshift.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "8d57e1f764ed7af7cf34d0b039ef97eb206fd846d31025ac08ddefa855a83204", + "chksum_sha256": "c1c98feecbde5e6ab84e1040351234c482e34dcaf95dd6e9ce1ca41980a97acc", "format": 1 }, { - "name": "molecule/default/roles/openshift_adm_groups/templates", + "name": "plugins/module_utils", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "molecule/default/roles/openshift_adm_groups/templates/ad", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, + "name": "plugins/module_utils/k8s.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c8bd794e43782ceb9bf24c552584bf6b22817c273503838e38ba3acbf41f7e9b", "format": 1 }, { - "name": "molecule/default/roles/openshift_adm_groups/templates/ad/definition.j2", + "name": "plugins/module_utils/openshift_adm_prune_auth.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "f59a7ee4f37de0040f293b615b45092f5bc9fb36f832d81ea6f0a7155bb2c1bd", + "chksum_sha256": "40e03e4fda07b0995fb4ead3937424746f4ad2e0302cf1be6b1709244534f4fd", "format": 1 }, { - "name": "molecule/default/roles/openshift_adm_groups/templates/ad/sync-config.j2", + "name": "plugins/module_utils/openshift_adm_prune_deployments.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "fef496b8ba026e34cdef374f373a15948db2bf6147bff5eb4ebdba4d1b3a4381", + "chksum_sha256": "e688fef20fcca0454f5d66be41959ef2caae94f655ea6b6e232df9153321be89", "format": 1 }, { - "name": "molecule/default/roles/openshift_adm_groups/templates/augmented-ad", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, + "name": "plugins/module_utils/openshift_adm_prune_images.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2e80eb099994a60a38ccb2d3d47963597999c11215cbd1746a558101592369ee", "format": 1 }, { - "name": "molecule/default/roles/openshift_adm_groups/templates/augmented-ad/definition.j2", + "name": "plugins/module_utils/openshift_builds.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "a2db659cc8d7467e4d9e78029027d6fff6cc1fd38e54c6ca954a9a64633470d0", + "chksum_sha256": "498738d81ec6249b0f8309a8aaa215a918bf9646c2e5f15c243395626d8f64dd", "format": 1 }, { - "name": "molecule/default/roles/openshift_adm_groups/templates/augmented-ad/sync-config.j2", + "name": "plugins/module_utils/openshift_common.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "76c20cd498ced5c2417a71611b35bde1b22b537fd106b5d8fecf1b896d7d5074", + "chksum_sha256": "6af4aa60cd80d06fc7f42af7ecf526b9c53a49173b7f896a7236a3b521c4ade9", "format": 1 }, { - "name": "molecule/default/roles/openshift_adm_groups/templates/rfc2307", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, + "name": "plugins/module_utils/openshift_docker_image.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "92a9a5b72dc08177f3f3ea78773da262371df7ae9629a25c8b3ad0c199f074d3", "format": 1 }, { - "name": "molecule/default/roles/openshift_adm_groups/templates/rfc2307/definition.j2", + "name": "plugins/module_utils/openshift_groups.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "5e2d7b8287dd1b1ad0067eb8a38d282ff43f95e3a91c3778b86205a14988b434", + "chksum_sha256": "28aab970ca56b3e6ce985dde4db867073756edb7504ee56b0df6859fde321e9f", "format": 1 }, { - "name": "molecule/default/roles/openshift_adm_groups/templates/rfc2307/sync-config.j2", + "name": "plugins/module_utils/openshift_images_common.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "64cdf5017cb257f3b26d7eb858c8295313684b1147f0baee3f657fc3dc36c1f9", + "chksum_sha256": "c8f211ba01c590ac57e4df775c0a58699c0ae25c631b635566ae2ff9aabe4b55", "format": 1 }, { - "name": "molecule/default/roles/openshift_adm_groups/defaults", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, + "name": "plugins/module_utils/openshift_import_image.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7cf4e412c985be34ed4cd9e7510db5ab7364508c298458006e148809fd407e82", "format": 1 }, { - "name": "molecule/default/roles/openshift_adm_groups/defaults/main.yml", + "name": "plugins/module_utils/openshift_ldap.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "1bd380f6c0cdf0c2b25e6bf27e4304566f6b9efbbeaf15da3847a9744d685a72", + "chksum_sha256": "3f60ab29cc0f32f3e895e170d9f9288e2b084deef377f9969c342ce51ebc58f8", "format": 1 }, { - "name": "molecule/default/molecule.yml", + "name": "plugins/module_utils/openshift_process.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "0e6bfb7aa0d20362717eebbcf8c8bee26211fc1d83bf23bdd5f8fb7046155c47", + "chksum_sha256": "004b12313ab35b10521146d1c0bd05a5838f1f25a33d2d465f5bb4aa9dc1d9f6", "format": 1 }, { - "name": "molecule/default/converge.yml", + "name": "plugins/module_utils/openshift_registry.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "bdd1f1cc563c232b34226f0316eb78d229ca562028e37e281ad76261131da044", + "chksum_sha256": "97235ea3a40a714e532d85b636fb81ea2eb8afdfbdea7da33a387408c692a272", "format": 1 }, { - "name": "molecule/default/files", + "name": "plugins/modules", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "molecule/default/files/nginx.env", + "name": "plugins/modules/k8s.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "a68c2914576119a809943116ce0050ca221c5b8f3a6722fa6abbf15dababde5f", + "chksum_sha256": "56f8482a9638fa5d9b223fafa6d6b62f805bdf7cfe65c96e257631b065ee0e3e", "format": 1 }, { - "name": "molecule/default/files/setup-crd.yml", + "name": "plugins/modules/openshift_adm_groups_sync.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "509878fff22a19715f1c491930eefd23430c0f571716b463c3ab9a754d0fb250", + "chksum_sha256": "1285e0be18423cd12a1a5ee715a6b6e2a9c6ee84d265a256dc041377ec86831a", "format": 1 }, { - "name": "molecule/default/files/pod-template.yaml", + "name": "plugins/modules/openshift_adm_migrate_template_instances.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "5fcb22787bccf2a5e1b6b90617f1aabadb71e9c9a906763e68b8573ef468f09a", + "chksum_sha256": "1db241bd4b22890659fc73ef8de575b3f2d61f6819920e67dacea664d58e3620", "format": 1 }, { - "name": "molecule/default/files/simple-template.yaml", + "name": "plugins/modules/openshift_adm_prune_auth.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "acbc61a1278772f1fd80a1bdb6546b6022842f7fd1c1b9aaf8bd65b606d93a2b", + "chksum_sha256": "2c7fd42950f839f827503c5de4a8111d235629d200ce13833f39f3193d854a8c", "format": 1 }, { - "name": "molecule/default/files/kuard-invalid-type.yml", + "name": "plugins/modules/openshift_adm_prune_builds.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "c0e8aa083eecb5c4d92108af83f9a2c931a2e5bbb766af4792adc4fb9fd9d32d", + "chksum_sha256": "d372ad2d48b33ca7210e4f642597fd31ccdca1eb310a949d3af9a6636328a68e", "format": 1 }, { - "name": "molecule/default/files/example.env", + "name": "plugins/modules/openshift_adm_prune_deployments.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "3701367ea0a79158d476e168c1b1e64639f703da926e63045cba5cfdbb6c3576", + "chksum_sha256": "5482e4282ce36f550b06f9184086f4e6a75ea5e5a216104b4fded399bf548267", "format": 1 }, { - "name": "molecule/default/files/kuard-extra-property.yml", + "name": "plugins/modules/openshift_adm_prune_images.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "27a06de7ca760698dc1d3c46ef12e40e214ae319a791a3965806b30adca06de0", + "chksum_sha256": "31f370fe2e70ea88a7cbd517285965a43b9363107a5b66956f2542b0f1412fa0", "format": 1 }, { - "name": "molecule/default/files/crd-resource.yml", + "name": "plugins/modules/openshift_auth.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "4921362ac3c4afac5f42ebb90b37bcb75e1fe20929bb0e45d0df4c190d28f577", + "chksum_sha256": "9237d7254c4773ff8f1cbb568af501ad0f5c6b00a089b495c67a131f201b017a", "format": 1 }, { - "name": "molecule/default/verify.yml", + "name": "plugins/modules/openshift_build.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "0aa66e5d35f985e47345d47e378816189d77cb131520e115f05c8eea2ab3bfc6", + "chksum_sha256": "90aa16f9cbb70213bbfaea856f06a745627f46c4bdfe4bed25676bcb3beb4ba2", "format": 1 }, { - "name": "molecule/default/vars", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, + "name": "plugins/modules/openshift_import_image.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1116270865430b4d5aac0d8ea4041a76f940554b4f1b42bff2508d1e503a6b26", "format": 1 }, { - "name": "molecule/default/vars/main.yml", + "name": "plugins/modules/openshift_process.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "75f11fae6d7f1e6574152509a4d7ad306966fc94eda2e610dfdb8a529a531228", + "chksum_sha256": "97656e3d189e4905db8dc435b707b58181ef22f134ec559152e836f5d54d4bd9", "format": 1 }, { - "name": "ci", + "name": "plugins/modules/openshift_registry_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e204f2672c2daa88228e6c19b5d556c05bfab5b1ec3b07b4e97fa0139626b9cc", + "format": 1 + }, + { + "name": "plugins/modules/openshift_route.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "27b4eab0a052bcbc0619d08104f2b040ad1b773b325d5f8b90c70fa54aa9d74f", + "format": 1 + }, + { + "name": "tests", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "ci/downstream.sh", + "name": "tests/sanity", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/sanity/ignore-2.10.txt", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "75816fd56cb1a00c37234e84b4b2109f49cfad60901429a96e36f528202d3c2e", + "chksum_sha256": "ba2285dad70183ce56e9d43ed97d4b7a1b12e7cdd3f4e5856fb3cf380148a5ee", "format": 1 }, { - "name": "ci/downstream_fragments.py", + "name": "tests/sanity/ignore-2.11.txt", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "9815e2aa84aef5d396fd07ddb371fa87362ade6fbe72d829e4d5983abe8af453", + "chksum_sha256": "dc0294c142585d8904331fd57755efc876e592ce8242e42e1159bcfddd4adbba", "format": 1 }, { - "name": "ci/doc_fragment_modules.py", + "name": "tests/sanity/ignore-2.12.txt", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "e5f5e24396a2cc86faedf6d0b7a3835db06bbe6fc7b534018ed85ebecfa39e09", + "chksum_sha256": "43bc048d24e8dd8fca8052b6135daaf59207e462f72cb9848f65de9c6e09552c", "format": 1 }, { - "name": "ci/incluster_integration.sh", + "name": "tests/sanity/ignore-2.13.txt", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "012ffb20a006094a4e8a00ead7947a196c477c4eb071a1dad5ffe42f509935e1", + "chksum_sha256": "43bc048d24e8dd8fca8052b6135daaf59207e462f72cb9848f65de9c6e09552c", "format": 1 }, { - "name": "ci/Dockerfile", + "name": "tests/sanity/ignore-2.14.txt", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "372a61931849250f7805a630fd5fc8b854f457d2752e1a70912beaa4773855ec", + "chksum_sha256": "43bc048d24e8dd8fca8052b6135daaf59207e462f72cb9848f65de9c6e09552c", "format": 1 }, { - "name": "docs", + "name": "tests/sanity/ignore-2.15.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "43bc048d24e8dd8fca8052b6135daaf59207e462f72cb9848f65de9c6e09552c", + "format": 1 + }, + { + "name": "tests/sanity/ignore-2.16.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "43bc048d24e8dd8fca8052b6135daaf59207e462f72cb9848f65de9c6e09552c", + "format": 1 + }, + { + "name": "tests/sanity/ignore-2.17.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "43bc048d24e8dd8fca8052b6135daaf59207e462f72cb9848f65de9c6e09552c", + "format": 1 + }, + { + "name": "tests/sanity/ignore-2.9.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cf94385a1d8a2b51f40f88879b59922c8a8b07b8f1e0ac511a2454e257361995", + "format": 1 + }, + { + "name": "tests/sanity/requirements.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c21e5168cbc64df9b025b078fa382484d308e6a633977cc57a3dd0c5fc1c70ed", + "format": 1 + }, + { + "name": "tests/unit", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "docs/community.okd.openshift_route_module.rst", + "name": "tests/unit/plugins", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit/plugins/module_utils", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit/plugins/module_utils/test_ldap_dn.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "979c41e0c3ec23b57267104feaa400b798ddbb3f3157e77a22dad0b05d067d19", + "chksum_sha256": "08829b431c2006a4a99bbbc4d5eb6c61612c7542dd1865893dd2f811c9a2b1a4", "format": 1 }, { - "name": "docs/community.okd.openshift_adm_prune_images_module.rst", + "name": "tests/unit/plugins/module_utils/test_ldap_sync_config.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "4b5a9d72104e8cfee2e7973f094aaf8af666a2bdec615cee482411261f5dfa57", + "chksum_sha256": "ab45e3fc6bcce7029a506464376bae9803a48cf99d1e3aa7b07caa475da561c2", "format": 1 }, { - "name": "docs/community.okd.k8s_module.rst", + "name": "tests/unit/plugins/module_utils/test_openshift_docker_image.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "5d8bf2916753820405f62f00174687e62756dd2b610faea666f1ba077a02d573", + "chksum_sha256": "3f058b71343cf91986e8a106444d6bc5f033d8bce6d1e6c9bb04d04d5deeb2db", "format": 1 }, { - "name": "docs/community.okd.openshift_inventory.rst", + "name": "tests/unit/plugins/modules", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_openshift_adm_migrate_template_instances.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "551e42026b2affd901c51f2be85fb69d9911e037edf424667eacc1b21aa00583", + "chksum_sha256": "4c426c87d28313524cbd378f917a2d70242f333571f942d44bc9bf8c7e591586", "format": 1 }, { - "name": "docs/ansible_turbo_mode.rst", + "name": "tests/unit/requirements.txt", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "a9bcd000c20de1d1ad35f5516f4fdffce07c8e28a5ebee572629a3b9cb867152", + "chksum_sha256": "8004a972c3d5c274d8c808e8d8afe03b9aca8af8eebf3df4298f114d8008b754", "format": 1 }, { - "name": "docs/community.okd.openshift_process_module.rst", + "name": "tests/config.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "3910d82ee4a8a31d84b6cbac6b7e6d6331d1ef9fe2b4331ce82e3065090baf66", + "chksum_sha256": "4129945061440c8d8c281590b8054b194f5958833c722aa40e878b09038c380e", "format": 1 }, { - "name": "docs/community.okd.openshift_adm_groups_sync_module.rst", + "name": ".gitignore", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "f8c405bb2c291cbdb3a0f4398bb9b205864dbb941cb0b0024e7cfa74d6ae49bd", + "chksum_sha256": "8344abb7aafbdb654f0c88532af7ab557dad7a6a7cba60f4834f33c2a1f8524f", "format": 1 }, { - "name": "docs/community.okd.openshift_auth_module.rst", + "name": ".yamllint", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "9c361ea64a0005fb0682c8fbd863cb7ef7cd0eb45f52383cc1123690e91fed11", + "chksum_sha256": "20f14c567d8ba0813a1ae58e298093a8004e4657baed321e4567de0f676beeaf", "format": 1 }, { - "name": "docs/community.okd.oc_connection.rst", + "name": "CHANGELOG.rst", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "c0ffee1276ab6cfa6677b42fed2fcd8b46622ed5cb4c8ee0f9c9cac4db870f75", + "chksum_sha256": "16845876aa6654eef1928c3cfddbe2a8846125d6165af1297c12564202901604", "format": 1 }, { - "name": "docs/community.okd.openshift_adm_prune_auth_module.rst", + "name": "CONTRIBUTING.md", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "fbad4ddccea64f4ed415cf86d54b7d6fc3e6a7be46c19d11fb20202b527275dd", + "chksum_sha256": "4156016c23606d288b98c4fa0eafc6afe1cf28e695fb5a267bcc3d337a7bfef0", "format": 1 }, { - "name": "docs/community.okd.openshift_adm_prune_builds_module.rst", + "name": "LICENSE", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "d4762b6900b787b6efc27fbeede84044839e083fa5bd06165eb5c1b17f203a31", + "chksum_sha256": "8b1ba204bb69a0ade2bfcf65ef294a920f6bb361b317dba43c7ef29d96332b9b", "format": 1 }, { - "name": "docs/community.okd.openshift_import_image_module.rst", + "name": "Makefile", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "f031bc1985bda8784a3cb684767db28a469216ba7e47940bf7c49844acc87b08", + "chksum_sha256": "624803bcb735d2b0c3ea45893d4d4640940044a0442815f3bd4047d1a09afd3a", "format": 1 }, { - "name": "docs/community.okd.openshift_build_module.rst", + "name": "OWNERS", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "50c1219f5524d60a4b46bc58c6df30d8af7b6e6ecc20fd4d15cf3e0fc352c05e", + "chksum_sha256": "771ebefb6c2b0154bf043cc537cc55b8421ddd4058248b00a62cdc9b190131da", "format": 1 }, { - "name": "docs/community.okd.openshift_registry_info_module.rst", + "name": "OWNERS_ALIASES", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "c66f8f9adb98eedcfda0e31034f5d14e881e9a41d3793fb36a3f311635166e69", + "chksum_sha256": "9ca328507f3e791383a7ab414eb73ab6d6becc4c637f2fdaace691e8be494774", "format": 1 }, { - "name": "docs/community.okd.openshift_adm_prune_deployments_module.rst", + "name": "README.md", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "02693279a5b1c52eaf4fd70d8f01d87750f9b0554cd1d550892192b7070f558a", + "chksum_sha256": "957df67ccf2961757af99aa031a59310cbbe7addb7852c84717bbd6cab8dffa7", "format": 1 }, { - "name": "docs/community.okd.openshift_adm_migrate_template_instances_module.rst", + "name": "codecov.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "99634e05dde50a01bd4cba0c696e3622864b744b6ea2f6103094c3809238ffd9", + "chksum_sha256": "caa848a2e02be5014890c5cbc7727e9a00d40394637c90886eb813d60f82c9c3", "format": 1 }, { - "name": "CHANGELOG.rst", + "name": "requirements.txt", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "b22ef082f56193da0b6bd2984c3eff9beb4e5980017a7fcb4bcdad7898ff5112", + "chksum_sha256": "d20fa49248fb968b70381346d2b2f6195e19dc1ddded06856bc4aefa6a25d431", "format": 1 }, { "name": "requirements.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "a020e70b5ab59096b404e97d4f4a04a3d0b63627dbbcede89b3e131050fcccb4", + "chksum_sha256": "c3d472e03d26b6fdeb1a9a3e986fb9df54ae2435406507e83050acc20433aedc", + "format": 1 + }, + { + "name": "test-requirements.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "518d4932bf69764d9e3b577eb15452a40665b270dddcd5a9d58fec8ac6b26239", + "format": 1 + }, + { + "name": "tox.ini", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a61868a5974ff417ea848cd6ab079ff0441f30dc652d0c470ddc742658213f84", "format": 1 } ], diff --git a/ansible_collections/community/okd/MANIFEST.json b/ansible_collections/community/okd/MANIFEST.json index dd2937e35..f12873236 100644 --- a/ansible_collections/community/okd/MANIFEST.json +++ b/ansible_collections/community/okd/MANIFEST.json @@ -2,7 +2,7 @@ "collection_info": { "namespace": "community", "name": "okd", - "version": "2.3.0", + "version": "3.0.1", "authors": [ "geerlingguy (https://www.jeffgeerling.com/)", "fabianvf (https://github.com/fabianvf)", @@ -23,7 +23,7 @@ "license": [], "license_file": "LICENSE", "dependencies": { - "kubernetes.core": ">=2.4.0" + "kubernetes.core": ">=3.0.0" }, "repository": "https://github.com/openshift/community.okd", "documentation": "", @@ -34,7 +34,7 @@ "name": "FILES.json", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "6e3f825a7ea74f2f591751d9ee4aac800f99ca344e86a5d00477306b14419ae8", + "chksum_sha256": "2e0de6ad088440b5ec4eb9313aa6dfacb5ae6be46f491cfc1302a8b321178167", "format": 1 }, "format": 1 diff --git a/ansible_collections/community/okd/Makefile b/ansible_collections/community/okd/Makefile index 7990f8447..b897aedb4 100644 --- a/ansible_collections/community/okd/Makefile +++ b/ansible_collections/community/okd/Makefile @@ -1,7 +1,7 @@ .PHONY: molecule # Also needs to be updated in galaxy.yml -VERSION = 2.3.0 +VERSION = 3.0.1 SANITY_TEST_ARGS ?= --docker --color UNITS_TEST_ARGS ?= --docker --color @@ -16,7 +16,7 @@ build: clean ansible-galaxy collection build install: build - ansible-galaxy collection install -p ansible_collections community-okd-$(VERSION).tar.gz + ansible-galaxy collection install --force -p ansible_collections community-okd-$(VERSION).tar.gz sanity: install cd ansible_collections/community/okd && ansible-test sanity -v --python $(PYTHON_VERSION) $(SANITY_TEST_ARGS) diff --git a/ansible_collections/community/okd/OWNERS_ALIASES b/ansible_collections/community/okd/OWNERS_ALIASES index f65b7d7aa..16c8602e3 100644 --- a/ansible_collections/community/okd/OWNERS_ALIASES +++ b/ansible_collections/community/okd/OWNERS_ALIASES @@ -1,19 +1,11 @@ aliases: community.okd-approvers: - gravesm - - akasurde - - fabianvf - - tima - - goneri - jillr - alinabuzachis - abikouo community.okd-reviewers: - gravesm - - akasurde - - fabianvf - - tima - - goneri - jillr - alinabuzachis - abikouo diff --git a/ansible_collections/community/okd/README.md b/ansible_collections/community/okd/README.md index f3e1bba67..4f6ba618e 100644 --- a/ansible_collections/community/okd/README.md +++ b/ansible_collections/community/okd/README.md @@ -10,10 +10,9 @@ The collection includes a variety of Ansible content to help automate the manage ## Ansible version compatibility -This collection has been tested against following Ansible versions: **>=2.9.17**. +This collection has been tested against following Ansible versions: **>=2.14.0**. -For collections that support Ansible 2.9, please ensure you update your `network_os` to use the -fully qualified collection name (for example, `cisco.ios.ios`). +Please ensure to update the `network_os` to use the fully qualified collection name (for example, `cisco.ios.ios`). Plugins and modules within a collection may be tested with only specific Ansible versions. A collection may contain metadata that identifies these versions. PEP440 is the schema used to describe the versions of Ansible. @@ -21,11 +20,11 @@ PEP440 is the schema used to describe the versions of Ansible. ## Python Support -* Collection supports 3.6+ +* Collection supports 3.9+ ## Kubernetes Version Support -This collection supports Kubernetes versions >=1.19. +This collection supports Kubernetes versions >=1.24. ## Included content @@ -77,7 +76,7 @@ You can also include it in a `requirements.yml` file and install it via `ansible --- collections: - name: community.okd - version: 2.3.0 + version: 3.0.1 ``` ### Installing the Kubernetes Python Library @@ -161,17 +160,17 @@ where the `IMAGE_FORMAT` environment variable is the full reference to your cont Releases are automatically built and pushed to Ansible Galaxy for any new tag. Before tagging a release, make sure to do the following: 1. Update the version in the following places: - a. The `version` in `galaxy.yml` - b. This README's `requirements.yml` example - c. The `DOWNSTREAM_VERSION` in `ci/downstream.sh` - d. The `VERSION` in `Makefile` - e. The version in `requirements.yml` - 1. Update the CHANGELOG: - 1. Make sure you have [`antsibull-changelog`](https://pypi.org/project/antsibull-changelog/) installed. - 1. Make sure there are fragments for all known changes in `changelogs/fragments`. - 1. Run `antsibull-changelog release`. - 1. Commit the changes and create a PR with the changes. Wait for tests to pass, then merge it once they have. - 1. Tag the version in Git and push to GitHub. + * a. The `version` in `galaxy.yml` + * b. This README's `requirements.yml` example + * c. The `DOWNSTREAM_VERSION` in `ci/downstream.sh` + * d. The `VERSION` in `Makefile` + * e. The version in `requirements.yml` + 2. Update the CHANGELOG: + * 1. Make sure you have [`antsibull-changelog`](https://pypi.org/project/antsibull-changelog/) installed. + * 2. Make sure there are fragments for all known changes in `changelogs/fragments`. + * 3. Run `antsibull-changelog release`. + 3. Commit the changes and create a PR with the changes. Wait for tests to pass, then merge it once they have. + 4. Tag the version in Git and push to GitHub. After the version is published, verify it exists on the [OKD Collection Galaxy page](https://galaxy.ansible.com/community/okd). diff --git a/ansible_collections/community/okd/changelogs/.plugin-cache.yaml b/ansible_collections/community/okd/changelogs/.plugin-cache.yaml deleted file mode 100644 index 223920534..000000000 --- a/ansible_collections/community/okd/changelogs/.plugin-cache.yaml +++ /dev/null @@ -1,92 +0,0 @@ -objects: {} -plugins: - become: {} - cache: {} - callback: {} - cliconf: {} - connection: - oc: - description: Execute tasks in pods running on OpenShift. - name: oc - version_added: null - httpapi: {} - inventory: - openshift: - description: OpenShift inventory source - name: openshift - version_added: null - lookup: {} - module: - k8s: - description: Manage OpenShift objects - name: k8s - namespace: '' - version_added: null - openshift_adm_groups_sync: - description: Sync OpenShift Groups with records from an external provider. - name: openshift_adm_groups_sync - namespace: '' - version_added: 2.1.0 - openshift_adm_migrate_template_instances: - description: Update TemplateInstances to point to the latest group-version-kinds - name: openshift_adm_migrate_template_instances - namespace: '' - version_added: 2.2.0 - openshift_adm_prune_auth: - description: Removes references to the specified roles, clusterroles, users, - and groups - name: openshift_adm_prune_auth - namespace: '' - version_added: 2.2.0 - openshift_adm_prune_builds: - description: Prune old completed and failed builds - name: openshift_adm_prune_builds - namespace: '' - version_added: 2.3.0 - openshift_adm_prune_deployments: - description: Remove old completed and failed deployment configs - name: openshift_adm_prune_deployments - namespace: '' - version_added: 2.2.0 - openshift_adm_prune_images: - description: Remove unreferenced images - name: openshift_adm_prune_images - namespace: '' - version_added: 2.2.0 - openshift_auth: - description: Authenticate to OpenShift clusters which require an explicit login - step - name: openshift_auth - namespace: '' - version_added: 0.2.0 - openshift_build: - description: Start a new build or Cancel running, pending, or new builds. - name: openshift_build - namespace: '' - version_added: 2.3.0 - openshift_import_image: - description: Import the latest image information from a tag in a container image - registry. - name: openshift_import_image - namespace: '' - version_added: 2.2.0 - openshift_process: - description: Process an OpenShift template.openshift.io/v1 Template - name: openshift_process - namespace: '' - version_added: 0.3.0 - openshift_registry_info: - description: Display information about the integrated registry. - name: openshift_registry_info - namespace: '' - version_added: 2.2.0 - openshift_route: - description: Expose a Service as an OpenShift Route. - name: openshift_route - namespace: '' - version_added: 0.3.0 - netconf: {} - shell: {} - strategy: {} - vars: {} -version: 2.3.0 diff --git a/ansible_collections/community/okd/changelogs/changelog.yaml b/ansible_collections/community/okd/changelogs/changelog.yaml index 12184d712..82cd00add 100644 --- a/ansible_collections/community/okd/changelogs/changelog.yaml +++ b/ansible_collections/community/okd/changelogs/changelog.yaml @@ -199,3 +199,28 @@ releases: name: openshift_build namespace: '' release_date: '2023-02-03' + 3.0.0: + changes: + breaking_changes: + - Bump minimum Python suupported version to 3.9 (https://github.com/openshift/community.okd/pull/202). + - Remove support for ansible-core < 2.14 (https://github.com/openshift/community.okd/pull/202). + deprecated_features: + - openshift - the ``openshift`` inventory plugin has been deprecated and will + be removed in release 4.0.0 (https://github.com/ansible-collections/kubernetes.core/issues/31). + release_summary: This major release drops support for ansible-core versions + lower than 2.14 and Python versions lower than 3.9. It also deprecates ``openshift`` + inventory plugin. + fragments: + - 20230206-deprecate-openshift-inventory.yml + - 20231107-move-sanity-and-units-to-gha.yml + release_date: '2023-11-20' + 3.0.1: + changes: + release_summary: 'This patch release fixes an issue in building the downstream + collection. + + ' + fragments: + - downstream-gitignore.yml + - release_summary.yml + release_date: '2023-11-30' diff --git a/ansible_collections/community/okd/changelogs/config.yaml b/ansible_collections/community/okd/changelogs/config.yaml index 1a31c10ed..4e6bb5e98 100644 --- a/ansible_collections/community/okd/changelogs/config.yaml +++ b/ansible_collections/community/okd/changelogs/config.yaml @@ -10,21 +10,21 @@ notesdir: fragments prelude_section_name: release_summary prelude_section_title: Release Summary sections: -- - major_changes - - Major Changes -- - minor_changes - - Minor Changes -- - breaking_changes - - Breaking Changes / Porting Guide -- - deprecated_features - - Deprecated Features -- - removed_features - - Removed Features (previously deprecated) -- - security_fixes - - Security Fixes -- - bugfixes - - Bugfixes -- - known_issues - - Known Issues + - - major_changes + - Major Changes + - - minor_changes + - Minor Changes + - - breaking_changes + - Breaking Changes / Porting Guide + - - deprecated_features + - Deprecated Features + - - removed_features + - Removed Features (previously deprecated) + - - security_fixes + - Security Fixes + - - bugfixes + - Bugfixes + - - known_issues + - Known Issues title: OKD Collection trivial_section_name: trivial diff --git a/ansible_collections/community/okd/ci/Dockerfile b/ansible_collections/community/okd/ci/Dockerfile index 1a509190d..d57e7a007 100644 --- a/ansible_collections/community/okd/ci/Dockerfile +++ b/ansible_collections/community/okd/ci/Dockerfile @@ -1,4 +1,4 @@ -FROM registry.access.redhat.com/ubi8/ubi +FROM registry.access.redhat.com/ubi9/ubi ENV OPERATOR=/usr/local/bin/ansible-operator \ USER_UID=1001 \ @@ -11,20 +11,20 @@ RUN yum install -y \ glibc-langpack-en \ git \ make \ - python39 \ - python39-devel \ - python39-pip \ - python39-setuptools \ + python3 \ + python3-devel \ + python3-pip \ + python3-setuptools \ gcc \ openldap-devel \ - && pip3 install --no-cache-dir --upgrade setuptools pip \ - && pip3 install --no-cache-dir \ + && python3.9 -m pip install --no-cache-dir --upgrade setuptools pip \ + && python3.9 -m pip install --no-cache-dir \ kubernetes \ - ansible==2.9.* \ - "molecule<3.3.0" \ + "ansible-core" \ + "molecule" \ && yum clean all \ && rm -rf $HOME/.cache \ - && curl -L https://github.com/openshift/okd/releases/download/4.5.0-0.okd-2020-08-12-020541/openshift-client-linux-4.5.0-0.okd-2020-08-12-020541.tar.gz | tar -xz -C /usr/local/bin + && curl -L https://github.com/openshift/okd/releases/download/4.12.0-0.okd-2023-04-16-041331/openshift-client-linux-4.12.0-0.okd-2023-04-16-041331.tar.gz | tar -xz -C /usr/local/bin # TODO: Is there a better way to install this client in ubi8? COPY . /opt/ansible diff --git a/ansible_collections/community/okd/ci/downstream.sh b/ansible_collections/community/okd/ci/downstream.sh index 001959c7e..c75398b77 100755 --- a/ansible_collections/community/okd/ci/downstream.sh +++ b/ansible_collections/community/okd/ci/downstream.sh @@ -9,7 +9,7 @@ # - All functions are prefixed with f_ so it's obvious where they come # from when in use throughout the script -DOWNSTREAM_VERSION="2.3.0" +DOWNSTREAM_VERSION="3.0.1" KEEP_DOWNSTREAM_TMPDIR="${KEEP_DOWNSTREAM_TMPDIR:-''}" INSTALL_DOWNSTREAM_COLLECTION_PATH="${INSTALL_DOWNSTREAM_COLLECTION_PATH:-}" _build_dir="" @@ -47,7 +47,7 @@ f_text_sub() sed -i.bak "s/Kubernetes/OpenShift/g" "${_build_dir}/galaxy.yml" sed -i.bak "s/^version\:.*$/version: ${DOWNSTREAM_VERSION}/" "${_build_dir}/galaxy.yml" sed -i.bak "/STARTREMOVE/,/ENDREMOVE/d" "${_build_dir}/README.md" - sed -i.bak "s/[[:space:]]okd:$/ openshift:/" ${_build_dir}/meta/runtime.yml + sed -i.bak "s/[[:space:]]okd:$/ openshift:/" "${_build_dir}/meta/runtime.yml" find "${_build_dir}" -type f ! -name galaxy.yml -exec sed -i.bak "s/community\.okd/redhat\.openshift/g" {} \; find "${_build_dir}" -type f -name "*.bak" -delete @@ -62,12 +62,12 @@ f_prep() # Files to copy downstream (relative repo root dir path) _file_manifest=( + .gitignore CHANGELOG.rst galaxy.yml LICENSE README.md Makefile - setup.cfg .yamllint requirements.txt requirements.yml @@ -76,6 +76,7 @@ f_prep() # Directories to recursively copy downstream (relative repo root dir path) _dir_manifest=( + .config changelogs ci meta @@ -145,7 +146,7 @@ f_handle_doc_fragments_workaround() local rendered_fragments="./rendereddocfragments.txt" # FIXME: Check Python interpreter from environment variable to work with prow - PYTHON=${DOWNSTREAM_BUILD_PYTHON:-/usr/bin/python3.6} + PYTHON=${DOWNSTREAM_BUILD_PYTHON:-/usr/bin/python3} f_log_info "Using Python interpreter: ${PYTHON}" # Modules with inherited doc fragments from kubernetes.core that need @@ -156,7 +157,7 @@ f_handle_doc_fragments_workaround() # Build the collection, export docs, render them, stitch it all back together pushd "${_build_dir}" || return ansible-galaxy collection build - ansible-galaxy collection install -p "${install_collections_dir}" ./*.tar.gz + ansible-galaxy collection install --force-with-deps -p "${install_collections_dir}" ./*.tar.gz rm ./*.tar.gz for doc_fragment_mod in "${_doc_fragment_modules[@]}" do diff --git a/ansible_collections/community/okd/docs/community.okd.k8s_module.rst b/ansible_collections/community/okd/docs/community.okd.k8s_module.rst index 8d0e0f9dc..9a14c4417 100644 --- a/ansible_collections/community/okd/docs/community.okd.k8s_module.rst +++ b/ansible_collections/community/okd/docs/community.okd.k8s_module.rst @@ -351,6 +351,41 @@ Parameters
Provide a URL for accessing the API. Can also be specified via K8S_AUTH_HOST environment variable.
+ + +
+ impersonate_groups + +
+ list + / elements=string +
+
added in 2.3.0
+ + + + +
Group(s) to impersonate for the operation.
+
Can also be specified via K8S_AUTH_IMPERSONATE_GROUPS environment. Example: Group1,Group2
+ + + + +
+ impersonate_user + +
+ string +
+
added in 2.3.0
+ + + + +
Username to impersonate for the operation.
+
Can also be specified via K8S_AUTH_IMPERSONATE_USER environment.
+ +
@@ -382,6 +417,7 @@ Parameters
Path to an existing Kubernetes config file. If not provided, and no other connection options are provided, the Kubernetes client will attempt to load the default configuration file from ~/.kube/config. Can also be specified via K8S_AUTH_KUBECONFIG environment variable.
+
Multiple Kubernetes config file can be provided using separator ';' for Windows platform or ':' for others platforms.
The kubernetes configuration can be provided as dictionary. This feature requires a python kubernetes client version >= 17.17.0. Added in version 2.2.0.
@@ -448,6 +484,25 @@ Parameters
If resource definition is provided, the metadata.namespace value from the resource_definition will override this option.
+ + +
+ no_proxy + +
+ string +
+
added in 2.3.0
+ + + + +
The comma separated list of hosts/domains/IP/CIDR that shouldn't go through proxy. Can also be specified via K8S_AUTH_NO_PROXY environment variable.
+
Please note that this module does not pick up typical proxy settings from the environment (e.g. NO_PROXY).
+
This feature requires kubernetes>=19.15.0. When kubernetes library is less than 19.15.0, it fails even no_proxy set in correct.
+
example value is "localhost,.local,.example.com,127.0.0.1,127.0.0.0/8,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16"
+ +
@@ -603,6 +658,7 @@ Parameters
Provide a path to a file containing a valid YAML definition of an object or objects to be created or updated. Mutually exclusive with resource_definition. NOTE: kind, api_version, name, and namespace will be overwritten by corresponding values found in the configuration read in from the src file.
Reads from the local file system. To read from the Ansible controller's file system, including vaulted files, use the file lookup plugin or template lookup plugin, combined with the from_yaml filter, and pass the result to resource_definition. See Examples below.
+
The URL to manifest files that can be used to create the resource. Added in version 2.4.0.
Mutually exclusive with template in case of kubernetes.core.k8s module.
@@ -938,10 +994,10 @@ Examples app: galaxy service: web ports: - - protocol: TCP - targetPort: 8000 - name: port-8000-tcp - port: 8000 + - protocol: TCP + targetPort: 8000 + name: port-8000-tcp + port: 8000 - name: Remove an existing Service object community.okd.k8s: @@ -975,15 +1031,15 @@ Examples state: present definition: "{{ lookup('template', '/testing/deployment.yml') | from_yaml }}" validate: - fail_on_error: yes + fail_on_error: true - name: warn on validation errors, check for unexpected properties community.okd.k8s: state: present definition: "{{ lookup('template', '/testing/deployment.yml') | from_yaml }}" validate: - fail_on_error: no - strict: yes + fail_on_error: false + strict: true @@ -1060,7 +1116,7 @@ Common return values are documented `here error -
error while trying to create/delete the object.
+
Error while trying to create/delete the object.

diff --git a/ansible_collections/community/okd/docs/community.okd.oc_connection.rst b/ansible_collections/community/okd/docs/community.okd.oc_connection.rst index 14f2477cc..12869df2a 100644 --- a/ansible_collections/community/okd/docs/community.okd.oc_connection.rst +++ b/ansible_collections/community/okd/docs/community.okd.oc_connection.rst @@ -308,7 +308,7 @@ Status Authors ~~~~~~~ -- xuxinkun +- xuxinkun (@xuxinkun) .. hint:: diff --git a/ansible_collections/community/okd/docs/community.okd.openshift_adm_groups_sync_module.rst b/ansible_collections/community/okd/docs/community.okd.openshift_adm_groups_sync_module.rst index e16aa4d54..7d319a472 100644 --- a/ansible_collections/community/okd/docs/community.okd.openshift_adm_groups_sync_module.rst +++ b/ansible_collections/community/okd/docs/community.okd.openshift_adm_groups_sync_module.rst @@ -55,6 +55,7 @@ Parameters + Default:
[]
Allowed groups, could be openshift group name or LDAP group dn value.
@@ -150,6 +151,7 @@ Parameters + Default:
[]
Denied groups, could be openshift group name or LDAP group dn value.
@@ -172,6 +174,41 @@ Parameters
Provide a URL for accessing the API. Can also be specified via K8S_AUTH_HOST environment variable.
+ + +
+ impersonate_groups + +
+ list + / elements=string +
+
added in 2.3.0
+ + + + +
Group(s) to impersonate for the operation.
+
Can also be specified via K8S_AUTH_IMPERSONATE_GROUPS environment. Example: Group1,Group2
+ + + + +
+ impersonate_user + +
+ string +
+
added in 2.3.0
+ + + + +
Username to impersonate for the operation.
+
Can also be specified via K8S_AUTH_IMPERSONATE_USER environment.
+ +
@@ -185,9 +222,29 @@ Parameters
Path to an existing Kubernetes config file. If not provided, and no other connection options are provided, the Kubernetes client will attempt to load the default configuration file from ~/.kube/config. Can also be specified via K8S_AUTH_KUBECONFIG environment variable.
+
Multiple Kubernetes config file can be provided using separator ';' for Windows platform or ':' for others platforms.
The kubernetes configuration can be provided as dictionary. This feature requires a python kubernetes client version >= 17.17.0. Added in version 2.2.0.
+ + +
+ no_proxy + +
+ string +
+
added in 2.3.0
+ + + + +
The comma separated list of hosts/domains/IP/CIDR that shouldn't go through proxy. Can also be specified via K8S_AUTH_NO_PROXY environment variable.
+
Please note that this module does not pick up typical proxy settings from the environment (e.g. NO_PROXY).
+
This feature requires kubernetes>=19.15.0. When kubernetes library is less than 19.15.0, it fails even no_proxy set in correct.
+
example value is "localhost,.local,.example.com,127.0.0.1,127.0.0.0/8,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16"
+ +
@@ -439,31 +496,31 @@ Examples - name: Sync all groups from an LDAP server openshift_adm_groups_sync: src: - kind: LDAPSyncConfig - apiVersion: v1 - url: ldap://localhost:1390 - insecure: true - bindDN: cn=admin,dc=example,dc=org - bindPassword: adminpassword - rfc2307: - groupsQuery: - baseDN: "cn=admins,ou=groups,dc=example,dc=org" - scope: sub - derefAliases: never - filter: (objectClass=*) - pageSize: 0 - groupUIDAttribute: dn - groupNameAttributes: [ cn ] - groupMembershipAttributes: [ member ] - usersQuery: - baseDN: "ou=users,dc=example,dc=org" - scope: sub - derefAliases: never - pageSize: 0 - userUIDAttribute: dn - userNameAttributes: [ mail ] - tolerateMemberNotFoundErrors: true - tolerateMemberOutOfScopeErrors: true + kind: LDAPSyncConfig + apiVersion: v1 + url: ldap://localhost:1390 + insecure: true + bindDN: cn=admin,dc=example,dc=org + bindPassword: adminpassword + rfc2307: + groupsQuery: + baseDN: "cn=admins,ou=groups,dc=example,dc=org" + scope: sub + derefAliases: never + filter: (objectClass=*) + pageSize: 0 + groupUIDAttribute: dn + groupNameAttributes: [cn] + groupMembershipAttributes: [member] + usersQuery: + baseDN: "ou=users,dc=example,dc=org" + scope: sub + derefAliases: never + pageSize: 0 + userUIDAttribute: dn + userNameAttributes: [mail] + tolerateMemberNotFoundErrors: true + tolerateMemberOutOfScopeErrors: true # Sync all groups except the ones from the deny_groups from an LDAP server - name: Sync all groups from an LDAP server using deny_groups diff --git a/ansible_collections/community/okd/docs/community.okd.openshift_adm_migrate_template_instances_module.rst b/ansible_collections/community/okd/docs/community.okd.openshift_adm_migrate_template_instances_module.rst index a7940ca85..c1d54dcfb 100644 --- a/ansible_collections/community/okd/docs/community.okd.openshift_adm_migrate_template_instances_module.rst +++ b/ansible_collections/community/okd/docs/community.okd.openshift_adm_migrate_template_instances_module.rst @@ -134,6 +134,41 @@ Parameters
Provide a URL for accessing the API. Can also be specified via K8S_AUTH_HOST environment variable.
+ + +
+ impersonate_groups + +
+ list + / elements=string +
+
added in 2.3.0
+ + + + +
Group(s) to impersonate for the operation.
+
Can also be specified via K8S_AUTH_IMPERSONATE_GROUPS environment. Example: Group1,Group2
+ + + + +
+ impersonate_user + +
+ string +
+
added in 2.3.0
+ + + + +
Username to impersonate for the operation.
+
Can also be specified via K8S_AUTH_IMPERSONATE_USER environment.
+ +
@@ -147,6 +182,7 @@ Parameters
Path to an existing Kubernetes config file. If not provided, and no other connection options are provided, the Kubernetes client will attempt to load the default configuration file from ~/.kube/config. Can also be specified via K8S_AUTH_KUBECONFIG environment variable.
+
Multiple Kubernetes config file can be provided using separator ';' for Windows platform or ':' for others platforms.
The kubernetes configuration can be provided as dictionary. This feature requires a python kubernetes client version >= 17.17.0. Added in version 2.2.0.
@@ -166,6 +202,25 @@ Parameters
If no namespace if specified, migrate objects in all namespaces.
+ + +
+ no_proxy + +
+ string +
+
added in 2.3.0
+ + + + +
The comma separated list of hosts/domains/IP/CIDR that shouldn't go through proxy. Can also be specified via K8S_AUTH_NO_PROXY environment variable.
+
Please note that this module does not pick up typical proxy settings from the environment (e.g. NO_PROXY).
+
This feature requires kubernetes>=19.15.0. When kubernetes library is less than 19.15.0, it fails even no_proxy set in correct.
+
example value is "localhost,.local,.example.com,127.0.0.1,127.0.0.0/8,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16"
+ +
@@ -478,13 +533,13 @@ Examples .. code-block:: yaml - name: Migrate TemplateInstances in namespace=test - community.okd.openshift_adm_migrate_template_instances: - namespace: test - register: _result + community.okd.openshift_adm_migrate_template_instances: + namespace: test + register: _result - - name: Migrate TemplateInstances in all namespaces - community.okd.openshift_adm_migrate_template_instances: - register: _result + - name: Migrate TemplateInstances in all namespaces + community.okd.openshift_adm_migrate_template_instances: + register: _result diff --git a/ansible_collections/community/okd/docs/community.okd.openshift_adm_prune_auth_module.rst b/ansible_collections/community/okd/docs/community.okd.openshift_adm_prune_auth_module.rst index b4b11c2be..4e8e1dd95 100644 --- a/ansible_collections/community/okd/docs/community.okd.openshift_adm_prune_auth_module.rst +++ b/ansible_collections/community/okd/docs/community.okd.openshift_adm_prune_auth_module.rst @@ -134,6 +134,41 @@ Parameters
Provide a URL for accessing the API. Can also be specified via K8S_AUTH_HOST environment variable.
+ + +
+ impersonate_groups + +
+ list + / elements=string +
+
added in 2.3.0
+ + + + +
Group(s) to impersonate for the operation.
+
Can also be specified via K8S_AUTH_IMPERSONATE_GROUPS environment. Example: Group1,Group2
+ + + + +
+ impersonate_user + +
+ string +
+
added in 2.3.0
+ + + + +
Username to impersonate for the operation.
+
Can also be specified via K8S_AUTH_IMPERSONATE_USER environment.
+ +
@@ -147,6 +182,7 @@ Parameters
Path to an existing Kubernetes config file. If not provided, and no other connection options are provided, the Kubernetes client will attempt to load the default configuration file from ~/.kube/config. Can also be specified via K8S_AUTH_KUBECONFIG environment variable.
+
Multiple Kubernetes config file can be provided using separator ';' for Windows platform or ':' for others platforms.
The kubernetes configuration can be provided as dictionary. This feature requires a python kubernetes client version >= 17.17.0. Added in version 2.2.0.
@@ -200,6 +236,25 @@ Parameters
Ignored when resource is set to clusterroles.
+ + +
+ no_proxy + +
+ string +
+
added in 2.3.0
+ + + + +
The comma separated list of hosts/domains/IP/CIDR that shouldn't go through proxy. Can also be specified via K8S_AUTH_NO_PROXY environment variable.
+
Please note that this module does not pick up typical proxy settings from the environment (e.g. NO_PROXY).
+
This feature requires kubernetes>=19.15.0. When kubernetes library is less than 19.15.0, it fails even no_proxy set in correct.
+
example value is "localhost,.local,.example.com,127.0.0.1,127.0.0.0/8,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16"
+ +
diff --git a/ansible_collections/community/okd/docs/community.okd.openshift_adm_prune_builds_module.rst b/ansible_collections/community/okd/docs/community.okd.openshift_adm_prune_builds_module.rst index 6fb9443b7..ccffb8d2b 100644 --- a/ansible_collections/community/okd/docs/community.okd.openshift_adm_prune_builds_module.rst +++ b/ansible_collections/community/okd/docs/community.okd.openshift_adm_prune_builds_module.rst @@ -435,7 +435,7 @@ Examples # all builds whose associated BuildConfig no longer exists - name: Run delete orphan Builds community.okd.openshift_adm_prune_builds: - orphans: True + orphans: true # Run deleting older completed and failed builds keep younger than 2hours - name: Run delete builds, keep younger than 2h diff --git a/ansible_collections/community/okd/docs/community.okd.openshift_adm_prune_deployments_module.rst b/ansible_collections/community/okd/docs/community.okd.openshift_adm_prune_deployments_module.rst index 16e0deda9..0c73845b6 100644 --- a/ansible_collections/community/okd/docs/community.okd.openshift_adm_prune_deployments_module.rst +++ b/ansible_collections/community/okd/docs/community.okd.openshift_adm_prune_deployments_module.rst @@ -134,6 +134,41 @@ Parameters
Provide a URL for accessing the API. Can also be specified via K8S_AUTH_HOST environment variable.
+ + +
+ impersonate_groups + +
+ list + / elements=string +
+
added in 2.3.0
+ + + + +
Group(s) to impersonate for the operation.
+
Can also be specified via K8S_AUTH_IMPERSONATE_GROUPS environment. Example: Group1,Group2
+ + + + +
+ impersonate_user + +
+ string +
+
added in 2.3.0
+ + + + +
Username to impersonate for the operation.
+
Can also be specified via K8S_AUTH_IMPERSONATE_USER environment.
+ +
@@ -162,6 +197,7 @@ Parameters
Path to an existing Kubernetes config file. If not provided, and no other connection options are provided, the Kubernetes client will attempt to load the default configuration file from ~/.kube/config. Can also be specified via K8S_AUTH_KUBECONFIG environment variable.
+
Multiple Kubernetes config file can be provided using separator ';' for Windows platform or ':' for others platforms.
The kubernetes configuration can be provided as dictionary. This feature requires a python kubernetes client version >= 17.17.0. Added in version 2.2.0.
@@ -180,6 +216,25 @@ Parameters
Use to specify namespace for deployments to be deleted.
+ + +
+ no_proxy + +
+ string +
+
added in 2.3.0
+ + + + +
The comma separated list of hosts/domains/IP/CIDR that shouldn't go through proxy. Can also be specified via K8S_AUTH_NO_PROXY environment variable.
+
Please note that this module does not pick up typical proxy settings from the environment (e.g. NO_PROXY).
+
This feature requires kubernetes>=19.15.0. When kubernetes library is less than 19.15.0, it fails even no_proxy set in correct.
+
example value is "localhost,.local,.example.com,127.0.0.1,127.0.0.0/8,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16"
+ +
@@ -382,7 +437,7 @@ Examples - name: Prune orphans deployments, keep younger than 2hours community.okd.openshift_adm_prune_deployments: - orphans: True + orphans: true keep_younger_than: 120 diff --git a/ansible_collections/community/okd/docs/community.okd.openshift_adm_prune_images_module.rst b/ansible_collections/community/okd/docs/community.okd.openshift_adm_prune_images_module.rst index 08fd357a3..9978b967c 100644 --- a/ansible_collections/community/okd/docs/community.okd.openshift_adm_prune_images_module.rst +++ b/ansible_collections/community/okd/docs/community.okd.openshift_adm_prune_images_module.rst @@ -177,6 +177,41 @@ Parameters
As a result an image may be incorrectly deleted as unused.
+ + +
+ impersonate_groups + +
+ list + / elements=string +
+
added in 2.3.0
+ + + + +
Group(s) to impersonate for the operation.
+
Can also be specified via K8S_AUTH_IMPERSONATE_GROUPS environment. Example: Group1,Group2
+ + + + +
+ impersonate_user + +
+ string +
+
added in 2.3.0
+ + + + +
Username to impersonate for the operation.
+
Can also be specified via K8S_AUTH_IMPERSONATE_USER environment.
+ +
@@ -205,6 +240,7 @@ Parameters
Path to an existing Kubernetes config file. If not provided, and no other connection options are provided, the Kubernetes client will attempt to load the default configuration file from ~/.kube/config. Can also be specified via K8S_AUTH_KUBECONFIG environment variable.
+
Multiple Kubernetes config file can be provided using separator ';' for Windows platform or ':' for others platforms.
The kubernetes configuration can be provided as dictionary. This feature requires a python kubernetes client version >= 17.17.0. Added in version 2.2.0.
@@ -223,6 +259,25 @@ Parameters
Use to specify namespace for objects.
+ + +
+ no_proxy + +
+ string +
+
added in 2.3.0
+ + + + +
The comma separated list of hosts/domains/IP/CIDR that shouldn't go through proxy. Can also be specified via K8S_AUTH_NO_PROXY environment variable.
+
Please note that this module does not pick up typical proxy settings from the environment (e.g. NO_PROXY).
+
This feature requires kubernetes>=19.15.0. When kubernetes library is less than 19.15.0, it fails even no_proxy set in correct.
+
example value is "localhost,.local,.example.com,127.0.0.1,127.0.0.0/8,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16"
+ +
diff --git a/ansible_collections/community/okd/docs/community.okd.openshift_auth_module.rst b/ansible_collections/community/okd/docs/community.okd.openshift_auth_module.rst index 3619940ea..a6e77039e 100644 --- a/ansible_collections/community/okd/docs/community.okd.openshift_auth_module.rst +++ b/ansible_collections/community/okd/docs/community.okd.openshift_auth_module.rst @@ -171,37 +171,40 @@ Examples .. code-block:: yaml - - hosts: localhost + - name: Example Playbook + hosts: localhost module_defaults: - group/k8s: + group/community.okd.okd: host: https://k8s.example.com/ ca_cert: ca.pem tasks: - - block: - # It's good practice to store login credentials in a secure vault and not - # directly in playbooks. - - include_vars: openshift_passwords.yml - - - name: Log in (obtain access token) - community.okd.openshift_auth: - username: admin - password: "{{ openshift_admin_password }}" - register: openshift_auth_results - - # Previous task provides the token/api_key, while all other parameters - # are taken from module_defaults - - name: Get a list of all pods from any namespace - kubernetes.core.k8s_info: - api_key: "{{ openshift_auth_results.openshift_auth.api_key }}" - kind: Pod - register: pod_list - - always: - - name: If login succeeded, try to log out (revoke access token) - when: openshift_auth_results.openshift_auth.api_key is defined - community.okd.openshift_auth: - state: absent - api_key: "{{ openshift_auth_results.openshift_auth.api_key }}" + - name: Authenticate to OpenShift cluster and gell a list of all pods from any namespace + block: + # It's good practice to store login credentials in a secure vault and not + # directly in playbooks. + - name: Include 'openshift_passwords.yml' + ansible.builtin.include_vars: openshift_passwords.yml + + - name: Log in (obtain access token) + community.okd.openshift_auth: + username: admin + password: "{{ openshift_admin_password }}" + register: openshift_auth_results + + # Previous task provides the token/api_key, while all other parameters + # are taken from module_defaults + - name: Get a list of all pods from any namespace + kubernetes.core.k8s_info: + api_key: "{{ openshift_auth_results.openshift_auth.api_key }}" + kind: Pod + register: pod_list + + always: + - name: If login succeeded, try to log out (revoke access token) + when: openshift_auth_results.openshift_auth.api_key is defined + community.okd.openshift_auth: + state: absent + api_key: "{{ openshift_auth_results.openshift_auth.api_key }}" diff --git a/ansible_collections/community/okd/docs/community.okd.openshift_import_image_module.rst b/ansible_collections/community/okd/docs/community.okd.openshift_import_image_module.rst index 920c8405b..c1bafd173 100644 --- a/ansible_collections/community/okd/docs/community.okd.openshift_import_image_module.rst +++ b/ansible_collections/community/okd/docs/community.okd.openshift_import_image_module.rst @@ -157,6 +157,41 @@ Parameters
Provide a URL for accessing the API. Can also be specified via K8S_AUTH_HOST environment variable.
+ + +
+ impersonate_groups + +
+ list + / elements=string +
+
added in 2.3.0
+ + + + +
Group(s) to impersonate for the operation.
+
Can also be specified via K8S_AUTH_IMPERSONATE_GROUPS environment. Example: Group1,Group2
+ + + + +
+ impersonate_user + +
+ string +
+
added in 2.3.0
+ + + + +
Username to impersonate for the operation.
+
Can also be specified via K8S_AUTH_IMPERSONATE_USER environment.
+ +
@@ -170,6 +205,7 @@ Parameters
Path to an existing Kubernetes config file. If not provided, and no other connection options are provided, the Kubernetes client will attempt to load the default configuration file from ~/.kube/config. Can also be specified via K8S_AUTH_KUBECONFIG environment variable.
+
Multiple Kubernetes config file can be provided using separator ';' for Windows platform or ':' for others platforms.
The kubernetes configuration can be provided as dictionary. This feature requires a python kubernetes client version >= 17.17.0. Added in version 2.2.0.
@@ -206,6 +242,25 @@ Parameters
Use to specify namespace for image stream to create/update.
+ + +
+ no_proxy + +
+ string +
+
added in 2.3.0
+ + + + +
The comma separated list of hosts/domains/IP/CIDR that shouldn't go through proxy. Can also be specified via K8S_AUTH_NO_PROXY environment variable.
+
Please note that this module does not pick up typical proxy settings from the environment (e.g. NO_PROXY).
+
This feature requires kubernetes>=19.15.0. When kubernetes library is less than 19.15.0, it fails even no_proxy set in correct.
+
example value is "localhost,.local,.example.com,127.0.0.1,127.0.0.0/8,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16"
+ +
diff --git a/ansible_collections/community/okd/docs/community.okd.openshift_inventory.rst b/ansible_collections/community/okd/docs/community.okd.openshift_inventory.rst index 57527f454..9015fb09f 100644 --- a/ansible_collections/community/okd/docs/community.okd.openshift_inventory.rst +++ b/ansible_collections/community/okd/docs/community.okd.openshift_inventory.rst @@ -13,6 +13,15 @@ community.okd.openshift :local: :depth: 1 +DEPRECATED +---------- +:Removed in collection release after +:Why: As discussed in https://github.com/ansible-collections/kubernetes.core/issues/31, we decided to +remove the openshift inventory plugin in release 4.0.0. + +:Alternative: Use :ref:`kubernetes.core.k8s_info ` and :ref:`ansible.builtin.add_host ` instead. + + Synopsis -------- @@ -320,24 +329,24 @@ Examples # File must be named openshift.yaml or openshift.yml - # Authenticate with token, and return all pods and services for all namespaces - plugin: community.okd.openshift - connections: - - host: https://192.168.64.4:8443 - api_key: xxxxxxxxxxxxxxxx - verify_ssl: false + - name: Authenticate with token, and return all pods and services for all namespaces + plugin: community.okd.openshift + connections: + - host: https://192.168.64.4:8443 + api_key: xxxxxxxxxxxxxxxx + verify_ssl: false - # Use default config (~/.kube/config) file and active context, and return objects for a specific namespace - plugin: community.okd.openshift - connections: - - namespaces: - - testing + - name: Use default config (~/.kube/config) file and active context, and return objects for a specific namespace + plugin: community.okd.openshift + connections: + - namespaces: + - testing - # Use a custom config file, and a specific context. - plugin: community.okd.openshift - connections: - - kubeconfig: /path/to/config - context: 'awx/192-168-64-4:8443/developer' + - name: Use a custom config file, and a specific context. + plugin: community.okd.openshift + connections: + - kubeconfig: /path/to/config + context: 'awx/192-168-64-4:8443/developer' @@ -346,10 +355,14 @@ Status ------ +- This inventory will be removed in version 4.0.0. *[deprecated]* +- For more information see `DEPRECATED`_. + + Authors ~~~~~~~ -- Chris Houseknecht <@chouseknecht> +- Chris Houseknecht (@chouseknecht) .. hint:: diff --git a/ansible_collections/community/okd/docs/community.okd.openshift_process_module.rst b/ansible_collections/community/okd/docs/community.okd.openshift_process_module.rst index 7de7e8c3a..9ccc70221 100644 --- a/ansible_collections/community/okd/docs/community.okd.openshift_process_module.rst +++ b/ansible_collections/community/okd/docs/community.okd.openshift_process_module.rst @@ -137,6 +137,41 @@ Parameters
Provide a URL for accessing the API. Can also be specified via K8S_AUTH_HOST environment variable.
+ + +
+ impersonate_groups + +
+ list + / elements=string +
+
added in 2.3.0
+ + + + +
Group(s) to impersonate for the operation.
+
Can also be specified via K8S_AUTH_IMPERSONATE_GROUPS environment. Example: Group1,Group2
+ + + + +
+ impersonate_user + +
+ string +
+
added in 2.3.0
+ + + + +
Username to impersonate for the operation.
+
Can also be specified via K8S_AUTH_IMPERSONATE_USER environment.
+ +
@@ -150,6 +185,7 @@ Parameters
Path to an existing Kubernetes config file. If not provided, and no other connection options are provided, the Kubernetes client will attempt to load the default configuration file from ~/.kube/config. Can also be specified via K8S_AUTH_KUBECONFIG environment variable.
+
Multiple Kubernetes config file can be provided using separator ';' for Windows platform or ':' for others platforms.
The kubernetes configuration can be provided as dictionary. This feature requires a python kubernetes client version >= 17.17.0. Added in version 2.2.0.
@@ -192,7 +228,7 @@ Parameters namespace_target
- - + string
@@ -202,6 +238,25 @@ Parameters
Only used when state is present or absent.
+ + +
+ no_proxy + +
+ string +
+
added in 2.3.0
+ + + + +
The comma separated list of hosts/domains/IP/CIDR that shouldn't go through proxy. Can also be specified via K8S_AUTH_NO_PROXY environment variable.
+
Please note that this module does not pick up typical proxy settings from the environment (e.g. NO_PROXY).
+
This feature requires kubernetes>=19.15.0. When kubernetes library is less than 19.15.0, it fails even no_proxy set in correct.
+
example value is "localhost,.local,.example.com,127.0.0.1,127.0.0.0/8,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16"
+ +
@@ -389,6 +444,7 @@ Parameters
Provide a path to a file containing a valid YAML definition of an object or objects to be created or updated. Mutually exclusive with resource_definition. NOTE: kind, api_version, name, and namespace will be overwritten by corresponding values found in the configuration read in from the src file.
Reads from the local file system. To read from the Ansible controller's file system, including vaulted files, use the file lookup plugin or template lookup plugin, combined with the from_yaml filter, and pass the result to resource_definition. See Examples below.
+
The URL to manifest files that can be used to create the resource. Added in version 2.4.0.
Mutually exclusive with template in case of kubernetes.core.k8s module.
@@ -616,8 +672,8 @@ Examples community.okd.k8s: namespace: default definition: '{{ item }}' - wait: yes - apply: yes + wait: true + apply: true loop: '{{ result.resources }}' - name: Process a template with parameters from an env file and create the resources @@ -627,7 +683,7 @@ Examples namespace_target: default parameter_file: 'files/nginx.env' state: present - wait: yes + wait: true - name: Process a local template and create the resources community.okd.openshift_process: @@ -642,7 +698,7 @@ Examples parameter_file: files/example.env namespace_target: default state: absent - wait: yes + wait: true diff --git a/ansible_collections/community/okd/docs/community.okd.openshift_registry_info_module.rst b/ansible_collections/community/okd/docs/community.okd.openshift_registry_info_module.rst index f556d0f64..563678590 100644 --- a/ansible_collections/community/okd/docs/community.okd.openshift_registry_info_module.rst +++ b/ansible_collections/community/okd/docs/community.okd.openshift_registry_info_module.rst @@ -156,6 +156,41 @@ Parameters
Provide a URL for accessing the API. Can also be specified via K8S_AUTH_HOST environment variable.
+ + +
+ impersonate_groups + +
+ list + / elements=string +
+
added in 2.3.0
+ + + + +
Group(s) to impersonate for the operation.
+
Can also be specified via K8S_AUTH_IMPERSONATE_GROUPS environment. Example: Group1,Group2
+ + + + +
+ impersonate_user + +
+ string +
+
added in 2.3.0
+ + + + +
Username to impersonate for the operation.
+
Can also be specified via K8S_AUTH_IMPERSONATE_USER environment.
+ +
@@ -169,9 +204,29 @@ Parameters
Path to an existing Kubernetes config file. If not provided, and no other connection options are provided, the Kubernetes client will attempt to load the default configuration file from ~/.kube/config. Can also be specified via K8S_AUTH_KUBECONFIG environment variable.
+
Multiple Kubernetes config file can be provided using separator ';' for Windows platform or ':' for others platforms.
The kubernetes configuration can be provided as dictionary. This feature requires a python kubernetes client version >= 17.17.0. Added in version 2.2.0.
+ + +
+ no_proxy + +
+ string +
+
added in 2.3.0
+ + + + +
The comma separated list of hosts/domains/IP/CIDR that shouldn't go through proxy. Can also be specified via K8S_AUTH_NO_PROXY environment variable.
+
Please note that this module does not pick up typical proxy settings from the environment (e.g. NO_PROXY).
+
This feature requires kubernetes>=19.15.0. When kubernetes library is less than 19.15.0, it fails even no_proxy set in correct.
+
example value is "localhost,.local,.example.com,127.0.0.1,127.0.0.0/8,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16"
+ +
@@ -356,7 +411,7 @@ Examples # Read registry integrated information and attempt to contact using local client. - name: Attempt to contact integrated registry using local client community.okd.openshift_registry_info: - check: yes + check: true diff --git a/ansible_collections/community/okd/docs/community.okd.openshift_route_module.rst b/ansible_collections/community/okd/docs/community.okd.openshift_route_module.rst index fc62623c5..4e939df8d 100644 --- a/ansible_collections/community/okd/docs/community.okd.openshift_route_module.rst +++ b/ansible_collections/community/okd/docs/community.okd.openshift_route_module.rst @@ -187,6 +187,41 @@ Parameters
The hostname for the Route.
+ + +
+ impersonate_groups + +
+ list + / elements=string +
+
added in 2.3.0
+ + + + +
Group(s) to impersonate for the operation.
+
Can also be specified via K8S_AUTH_IMPERSONATE_GROUPS environment. Example: Group1,Group2
+ + + + +
+ impersonate_user + +
+ string +
+
added in 2.3.0
+ + + + +
Username to impersonate for the operation.
+
Can also be specified via K8S_AUTH_IMPERSONATE_USER environment.
+ +
@@ -200,6 +235,7 @@ Parameters
Path to an existing Kubernetes config file. If not provided, and no other connection options are provided, the Kubernetes client will attempt to load the default configuration file from ~/.kube/config. Can also be specified via K8S_AUTH_KUBECONFIG environment variable.
+
Multiple Kubernetes config file can be provided using separator ';' for Windows platform or ':' for others platforms.
The kubernetes configuration can be provided as dictionary. This feature requires a python kubernetes client version >= 17.17.0. Added in version 2.2.0.
@@ -252,6 +288,25 @@ Parameters
The Route will be created in this namespace as well.
+ + +
+ no_proxy + +
+ string +
+
added in 2.3.0
+ + + + +
The comma separated list of hosts/domains/IP/CIDR that shouldn't go through proxy. Can also be specified via K8S_AUTH_NO_PROXY environment variable.
+
Please note that this module does not pick up typical proxy settings from the environment (e.g. NO_PROXY).
+
This feature requires kubernetes>=19.15.0. When kubernetes library is less than 19.15.0, it fails even no_proxy set in correct.
+
example value is "localhost,.local,.example.com,127.0.0.1,127.0.0.0/8,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16"
+ +
@@ -801,10 +856,10 @@ Examples app: hello-kubernetes spec: containers: - - name: hello-kubernetes - image: paulbouwer/hello-kubernetes:1.8 - ports: - - containerPort: 8080 + - name: hello-kubernetes + image: paulbouwer/hello-kubernetes:1.8 + ports: + - containerPort: 8080 - name: Create Service for the hello-world deployment community.okd.k8s: @@ -816,8 +871,8 @@ Examples namespace: default spec: ports: - - port: 80 - targetPort: 8080 + - port: 80 + targetPort: 8080 selector: app: hello-kubernetes diff --git a/ansible_collections/community/okd/meta/runtime.yml b/ansible_collections/community/okd/meta/runtime.yml index e1ab57a85..533a03020 100644 --- a/ansible_collections/community/okd/meta/runtime.yml +++ b/ansible_collections/community/okd/meta/runtime.yml @@ -1,5 +1,5 @@ --- -requires_ansible: '>=2.9.17' +requires_ansible: '>=2.14.0' action_groups: okd: - k8s @@ -17,6 +17,13 @@ plugin_routing: modules: k8s_auth: redirect: community.okd.openshift_auth + inventory: + openshift: + deprecation: + removal_version: 4.0.0 + warning_text: >- + The openshift inventory plugin has been deprecated and + will be removed in release 4.0.0. action: k8s: redirect: kubernetes.core.k8s_info diff --git a/ansible_collections/community/okd/molecule/default/converge.yml b/ansible_collections/community/okd/molecule/default/converge.yml index 7fe9e8209..fb45006c3 100644 --- a/ansible_collections/community/okd/molecule/default/converge.yml +++ b/ansible_collections/community/okd/molecule/default/converge.yml @@ -21,16 +21,13 @@ debug: var: output - - name: Create deployment config + - name: Create deployment community.okd.k8s: state: present name: hello-world namespace: testing definition: '{{ okd_dc_template }}' wait: yes - wait_condition: - type: Available - status: True vars: k8s_pod_name: hello-world k8s_pod_image: python @@ -71,19 +68,12 @@ namespace: '{{ namespace }}' definition: '{{ okd_imagestream_template }}' - - name: Create DeploymentConfig to reference ImageStream - community.okd.k8s: - name: '{{ k8s_pod_name }}' - namespace: '{{ namespace }}' - definition: '{{ okd_dc_template }}' - vars: - k8s_pod_name: is-idempotent-dc - - name: Create Deployment to reference ImageStream community.okd.k8s: name: '{{ k8s_pod_name }}' namespace: '{{ namespace }}' definition: '{{ k8s_deployment_template | combine(metadata) }}' + wait: true vars: k8s_pod_annotations: "alpha.image.policy.openshift.io/resolve-names": "*" diff --git a/ansible_collections/community/okd/molecule/default/files/pod-template.yaml b/ansible_collections/community/okd/molecule/default/files/pod-template.yaml index ac388ad67..3a1c8f1b8 100644 --- a/ansible_collections/community/okd/molecule/default/files/pod-template.yaml +++ b/ansible_collections/community/okd/molecule/default/files/pod-template.yaml @@ -10,14 +10,14 @@ objects: name: "Pod-${{ NAME }}" spec: containers: - - args: - - /bin/sh - - -c - - while true; do echo $(date); sleep 15; done - image: python:3.7-alpine - imagePullPolicy: Always - name: python + - args: + - /bin/sh + - -c + - while true; do echo $(date); sleep 15; done + image: python:3.7-alpine + imagePullPolicy: Always + name: python parameters: - - name: NAME + - name: NAME description: trailing name of the pod required: true diff --git a/ansible_collections/community/okd/molecule/default/files/simple-template.yaml b/ansible_collections/community/okd/molecule/default/files/simple-template.yaml index 29c85b9cd..c8270f776 100644 --- a/ansible_collections/community/okd/molecule/default/files/simple-template.yaml +++ b/ansible_collections/community/okd/molecule/default/files/simple-template.yaml @@ -13,22 +13,22 @@ metadata: tags: quickstart,examples name: simple-example objects: -- apiVersion: v1 - kind: ConfigMap - metadata: - annotations: - description: Big example - name: ${NAME} - data: - content: "${CONTENT}" + - apiVersion: v1 + kind: ConfigMap + metadata: + annotations: + description: Big example + name: ${NAME} + data: + content: "${CONTENT}" parameters: -- description: The name assigned to the ConfigMap - displayName: Name - name: NAME - required: true - value: example -- description: The value for the content key of the configmap - displayName: Content - name: CONTENT - required: true - value: '' + - description: The name assigned to the ConfigMap + displayName: Name + name: NAME + required: true + value: example + - description: The value for the content key of the configmap + displayName: Content + name: CONTENT + required: true + value: '' diff --git a/ansible_collections/community/okd/molecule/default/molecule.yml b/ansible_collections/community/okd/molecule/default/molecule.yml index 43407bd26..7ca7e2c5b 100644 --- a/ansible_collections/community/okd/molecule/default/molecule.yml +++ b/ansible_collections/community/okd/molecule/default/molecule.yml @@ -4,7 +4,7 @@ dependency: options: requirements-file: requirements.yml driver: - name: delegated + name: default platforms: - name: cluster groups: @@ -17,9 +17,6 @@ provisioner: config_options: inventory: enable_plugins: community.okd.openshift - lint: | - set -e - ansible-lint inventory: hosts: plugin: community.okd.openshift @@ -34,14 +31,10 @@ provisioner: ANSIBLE_COLLECTIONS_PATHS: ${OVERRIDE_COLLECTION_PATH:-$MOLECULE_PROJECT_DIRECTORY} verifier: name: ansible - lint: | - set -e - ansible-lint scenario: name: default test_sequence: - dependency - - lint - syntax - prepare - converge diff --git a/ansible_collections/community/okd/molecule/default/prepare.yml b/ansible_collections/community/okd/molecule/default/prepare.yml index f155ec1d4..0d0361ab2 100644 --- a/ansible_collections/community/okd/molecule/default/prepare.yml +++ b/ansible_collections/community/okd/molecule/default/prepare.yml @@ -37,12 +37,12 @@ name: cluster spec: identityProviders: - - name: htpasswd_provider - mappingMethod: claim - type: HTPasswd - htpasswd: - fileData: - name: htpass-secret + - name: htpasswd_provider + mappingMethod: claim + type: HTPasswd + htpasswd: + fileData: + name: htpass-secret - name: Create ClusterRoleBinding for test user community.okd.k8s: diff --git a/ansible_collections/community/okd/molecule/default/roles/openshift_adm_groups/library/openshift_ldap_entry_info.py b/ansible_collections/community/okd/molecule/default/roles/openshift_adm_groups/library/openshift_ldap_entry_info.py index ba49f724d..c5bde3e5f 100644 --- a/ansible_collections/community/okd/molecule/default/roles/openshift_adm_groups/library/openshift_ldap_entry_info.py +++ b/ansible_collections/community/okd/molecule/default/roles/openshift_adm_groups/library/openshift_ldap_entry_info.py @@ -89,6 +89,7 @@ def execute(): ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_NEVER) connection = ldap.initialize(module.params['server_uri']) + connection.set_option(ldap.OPT_REFERRALS, 0) try: connection.simple_bind_s(module.params['bind_dn'], module.params['bind_pw']) except ldap.LDAPError as e: diff --git a/ansible_collections/community/okd/molecule/default/roles/openshift_adm_groups/tasks/activeDirectory.yml b/ansible_collections/community/okd/molecule/default/roles/openshift_adm_groups/tasks/activeDirectory.yml index da99f324e..78131a876 100644 --- a/ansible_collections/community/okd/molecule/default/roles/openshift_adm_groups/tasks/activeDirectory.yml +++ b/ansible_collections/community/okd/molecule/default/roles/openshift_adm_groups/tasks/activeDirectory.yml @@ -1,227 +1,227 @@ +--- - block: - - name: Get LDAP definition - set_fact: - ldap_entries: "{{ lookup('template', 'ad/definition.j2') | from_yaml }}" - - - name: Delete openshift groups if existing - community.okd.k8s: - state: absent - kind: Group - version: "user.openshift.io/v1" - name: "{{ item }}" - with_items: - - admins - - developers - - - name: Delete existing LDAP Entries - openshift_ldap_entry: - bind_dn: "{{ ldap_bind_dn }}" - bind_pw: "{{ ldap_bind_pw }}" - server_uri: "{{ ldap_server_uri }}" - dn: "{{ item.dn }}" - state: absent - with_items: "{{ ldap_entries.users + ldap_entries.units | reverse | list }}" - - - name: Create LDAP Entries - openshift_ldap_entry: - bind_dn: "{{ ldap_bind_dn }}" - bind_pw: "{{ ldap_bind_pw }}" - server_uri: "{{ ldap_server_uri }}" - dn: "{{ item.dn }}" - attributes: "{{ item.attr }}" - objectClass: "{{ item.class }}" - with_items: "{{ ldap_entries.units + ldap_entries.users }}" - - - name: Load test configurations - set_fact: - sync_config: "{{ lookup('template', 'ad/sync-config.j2') | from_yaml }}" - - - name: Synchronize Groups - community.okd.openshift_adm_groups_sync: - config: "{{ sync_config }}" - check_mode: yes - register: result - - - name: Validate Group going to be created - assert: - that: - - result is changed - - admins_group - - devs_group - - '"jane.smith@ansible.org" in {{ admins_group.users }}' - - '"jim.adams@ansible.org" in {{ admins_group.users }}' - - '"jordanbulls@ansible.org" in {{ devs_group.users }}' - - admins_group.users | length == 2 - - devs_group.users | length == 1 - vars: - admins_group: "{{ result.groups | selectattr('metadata.name', 'equalto', 'admins') | first }}" - devs_group: "{{ result.groups | selectattr('metadata.name', 'equalto', 'developers') | first }}" - - - - name: Synchronize Groups (Remove check_mode) - community.okd.openshift_adm_groups_sync: - config: "{{ sync_config }}" - register: result - - - name: Validate Group going to be created - assert: - that: - - result is changed - - - name: Read admins group - kubernetes.core.k8s_info: - kind: Group - version: "user.openshift.io/v1" - name: admins - register: result - - - name: Validate group was created - assert: - that: - - result.resources | length == 1 - - '"jane.smith@ansible.org" in {{ result.resources.0.users }}' - - '"jim.adams@ansible.org" in {{ result.resources.0.users }}' - - - name: Read developers group - kubernetes.core.k8s_info: - kind: Group - version: "user.openshift.io/v1" - name: developers - register: result - - - name: Validate group was created - assert: - that: - - result.resources | length == 1 - - '"jordanbulls@ansible.org" in {{ result.resources.0.users }}' - - - name: Define user dn to delete - set_fact: - user_to_delete: "cn=Jane,ou=engineers,ou=activeD,{{ ldap_root }}" - - - name: Delete 1 admin user - openshift_ldap_entry: - bind_dn: "{{ ldap_bind_dn }}" - bind_pw: "{{ ldap_bind_pw }}" - server_uri: "{{ ldap_server_uri }}" - dn: "{{ user_to_delete }}" - state: absent - - - name: Synchronize Openshift groups using allow_groups - community.okd.openshift_adm_groups_sync: - config: "{{ sync_config }}" - allow_groups: - - developers - type: openshift - register: openshift_sync - - - name: Validate that only developers group was sync - assert: - that: - - openshift_sync is changed - - openshift_sync.groups | length == 1 - - openshift_sync.groups.0.metadata.name == "developers" - - - name: Read admins group - kubernetes.core.k8s_info: - kind: Group - version: "user.openshift.io/v1" - name: admins - register: result - - - name: Validate admins group content has not changed - assert: - that: - - result.resources | length == 1 - - '"jane.smith@ansible.org" in {{ result.resources.0.users }}' - - '"jim.adams@ansible.org" in {{ result.resources.0.users }}' - - - name: Synchronize Openshift groups using deny_groups - community.okd.openshift_adm_groups_sync: - config: "{{ sync_config }}" - deny_groups: + - name: Get LDAP definition + set_fact: + ldap_entries: "{{ lookup('template', 'ad/definition.j2') | from_yaml }}" + + - name: Delete openshift groups if existing + community.okd.k8s: + state: absent + kind: Group + version: "user.openshift.io/v1" + name: "{{ item }}" + with_items: + - admins - developers - type: openshift - register: openshift_sync - - - name: Validate that only admins group was sync - assert: - that: - - openshift_sync is changed - - openshift_sync.groups | length == 1 - - openshift_sync.groups.0.metadata.name == "admins" - - - name: Read admins group - kubernetes.core.k8s_info: - kind: Group - version: "user.openshift.io/v1" - name: admins - register: result - - - name: Validate admins group contains only 1 user now - assert: - that: - - result.resources | length == 1 - - result.resources.0.users == ["jim.adams@ansible.org"] - - - name: Set users to delete (delete all developers users) - set_fact: - user_to_delete: "cn=Jordan,ou=engineers,ou=activeD,{{ ldap_root }}" - - - name: Delete 1 admin user - openshift_ldap_entry: - bind_dn: "{{ ldap_bind_dn }}" - bind_pw: "{{ ldap_bind_pw }}" - server_uri: "{{ ldap_server_uri }}" - dn: "{{ user_to_delete }}" - state: absent - - - name: Prune groups - community.okd.openshift_adm_groups_sync: - config: "{{ sync_config }}" - state: absent - register: result - - - name: Validate result is changed (only developers group be deleted) - assert: - that: - - result is changed - - result.groups | length == 1 - - - name: Get developers group info - kubernetes.core.k8s_info: - kind: Group - version: "user.openshift.io/v1" - name: developers - register: result - - - name: assert group was deleted - assert: - that: - - result.resources | length == 0 - - - name: Get admins group info - kubernetes.core.k8s_info: - kind: Group - version: "user.openshift.io/v1" - name: admins - register: result - - - name: assert group was not deleted - assert: - that: - - result.resources | length == 1 - - - name: Prune groups once again (idempotency) - community.okd.openshift_adm_groups_sync: - config: "{{ sync_config }}" - state: absent - register: result - - - name: Assert nothing was changed - assert: - that: - - result is not changed + + - name: Delete existing LDAP Entries + openshift_ldap_entry: + bind_dn: "{{ ldap_bind_dn }}" + bind_pw: "{{ ldap_bind_pw }}" + server_uri: "{{ ldap_server_uri }}" + dn: "{{ item.dn }}" + state: absent + with_items: "{{ ldap_entries.users + ldap_entries.units | reverse | list }}" + + - name: Create LDAP Entries + openshift_ldap_entry: + bind_dn: "{{ ldap_bind_dn }}" + bind_pw: "{{ ldap_bind_pw }}" + server_uri: "{{ ldap_server_uri }}" + dn: "{{ item.dn }}" + attributes: "{{ item.attr }}" + objectClass: "{{ item.class }}" + with_items: "{{ ldap_entries.units + ldap_entries.users }}" + + - name: Load test configurations + set_fact: + sync_config: "{{ lookup('template', 'ad/sync-config.j2') | from_yaml }}" + + - name: Synchronize Groups + community.okd.openshift_adm_groups_sync: + config: "{{ sync_config }}" + check_mode: yes + register: result + + - name: Validate Group going to be created + assert: + that: + - result is changed + - admins_group + - devs_group + - '"jane.smith@ansible.org" in {{ admins_group.users }}' + - '"jim.adams@ansible.org" in {{ admins_group.users }}' + - '"jordanbulls@ansible.org" in {{ devs_group.users }}' + - admins_group.users | length == 2 + - devs_group.users | length == 1 + vars: + admins_group: "{{ result.groups | selectattr('metadata.name', 'equalto', 'admins') | first }}" + devs_group: "{{ result.groups | selectattr('metadata.name', 'equalto', 'developers') | first }}" + + - name: Synchronize Groups (Remove check_mode) + community.okd.openshift_adm_groups_sync: + config: "{{ sync_config }}" + register: result + + - name: Validate Group going to be created + assert: + that: + - result is changed + + - name: Read admins group + kubernetes.core.k8s_info: + kind: Group + version: "user.openshift.io/v1" + name: admins + register: result + + - name: Validate group was created + assert: + that: + - result.resources | length == 1 + - '"jane.smith@ansible.org" in {{ result.resources.0.users }}' + - '"jim.adams@ansible.org" in {{ result.resources.0.users }}' + + - name: Read developers group + kubernetes.core.k8s_info: + kind: Group + version: "user.openshift.io/v1" + name: developers + register: result + + - name: Validate group was created + assert: + that: + - result.resources | length == 1 + - '"jordanbulls@ansible.org" in {{ result.resources.0.users }}' + + - name: Define user dn to delete + set_fact: + user_to_delete: "cn=Jane,ou=engineers,ou=activeD,{{ ldap_root }}" + + - name: Delete 1 admin user + openshift_ldap_entry: + bind_dn: "{{ ldap_bind_dn }}" + bind_pw: "{{ ldap_bind_pw }}" + server_uri: "{{ ldap_server_uri }}" + dn: "{{ user_to_delete }}" + state: absent + + - name: Synchronize Openshift groups using allow_groups + community.okd.openshift_adm_groups_sync: + config: "{{ sync_config }}" + allow_groups: + - developers + type: openshift + register: openshift_sync + + - name: Validate that only developers group was sync + assert: + that: + - openshift_sync is changed + - openshift_sync.groups | length == 1 + - openshift_sync.groups.0.metadata.name == "developers" + + - name: Read admins group + kubernetes.core.k8s_info: + kind: Group + version: "user.openshift.io/v1" + name: admins + register: result + + - name: Validate admins group content has not changed + assert: + that: + - result.resources | length == 1 + - '"jane.smith@ansible.org" in {{ result.resources.0.users }}' + - '"jim.adams@ansible.org" in {{ result.resources.0.users }}' + + - name: Synchronize Openshift groups using deny_groups + community.okd.openshift_adm_groups_sync: + config: "{{ sync_config }}" + deny_groups: + - developers + type: openshift + register: openshift_sync + + - name: Validate that only admins group was sync + assert: + that: + - openshift_sync is changed + - openshift_sync.groups | length == 1 + - openshift_sync.groups.0.metadata.name == "admins" + + - name: Read admins group + kubernetes.core.k8s_info: + kind: Group + version: "user.openshift.io/v1" + name: admins + register: result + + - name: Validate admins group contains only 1 user now + assert: + that: + - result.resources | length == 1 + - result.resources.0.users == ["jim.adams@ansible.org"] + + - name: Set users to delete (delete all developers users) + set_fact: + user_to_delete: "cn=Jordan,ou=engineers,ou=activeD,{{ ldap_root }}" + + - name: Delete 1 admin user + openshift_ldap_entry: + bind_dn: "{{ ldap_bind_dn }}" + bind_pw: "{{ ldap_bind_pw }}" + server_uri: "{{ ldap_server_uri }}" + dn: "{{ user_to_delete }}" + state: absent + + - name: Prune groups + community.okd.openshift_adm_groups_sync: + config: "{{ sync_config }}" + state: absent + register: result + + - name: Validate result is changed (only developers group be deleted) + assert: + that: + - result is changed + - result.groups | length == 1 + + - name: Get developers group info + kubernetes.core.k8s_info: + kind: Group + version: "user.openshift.io/v1" + name: developers + register: result + + - name: assert group was deleted + assert: + that: + - result.resources | length == 0 + + - name: Get admins group info + kubernetes.core.k8s_info: + kind: Group + version: "user.openshift.io/v1" + name: admins + register: result + + - name: assert group was not deleted + assert: + that: + - result.resources | length == 1 + + - name: Prune groups once again (idempotency) + community.okd.openshift_adm_groups_sync: + config: "{{ sync_config }}" + state: absent + register: result + + - name: Assert nothing was changed + assert: + that: + - result is not changed always: - name: Delete openshift groups if existing diff --git a/ansible_collections/community/okd/molecule/default/roles/openshift_adm_groups/tasks/augmentedActiveDirectory.yml b/ansible_collections/community/okd/molecule/default/roles/openshift_adm_groups/tasks/augmentedActiveDirectory.yml index f70d3bd8e..f089b33ad 100644 --- a/ansible_collections/community/okd/molecule/default/roles/openshift_adm_groups/tasks/augmentedActiveDirectory.yml +++ b/ansible_collections/community/okd/molecule/default/roles/openshift_adm_groups/tasks/augmentedActiveDirectory.yml @@ -1,166 +1,165 @@ +--- - block: - - name: Get LDAP definition - set_fact: - ldap_entries: "{{ lookup('template', 'augmented-ad/definition.j2') | from_yaml }}" - - - name: Delete openshift groups if existing - community.okd.k8s: - state: absent - kind: Group - version: "user.openshift.io/v1" - name: "{{ item }}" - with_items: - - banking - - insurance - - - name: Delete existing LDAP entries - openshift_ldap_entry: - bind_dn: "{{ ldap_bind_dn }}" - bind_pw: "{{ ldap_bind_pw }}" - server_uri: "{{ ldap_server_uri }}" - dn: "{{ item.dn }}" - state: absent - with_items: "{{ ldap_entries.users + ldap_entries.groups + ldap_entries.units | reverse | list }}" - - - name: Create LDAP Entries - openshift_ldap_entry: - bind_dn: "{{ ldap_bind_dn }}" - bind_pw: "{{ ldap_bind_pw }}" - server_uri: "{{ ldap_server_uri }}" - dn: "{{ item.dn }}" - attributes: "{{ item.attr }}" - objectClass: "{{ item.class }}" - with_items: "{{ ldap_entries.units + ldap_entries.groups + ldap_entries.users }}" - - - name: Load test configurations - set_fact: - sync_config: "{{ lookup('template', 'augmented-ad/sync-config.j2') | from_yaml }}" - - - name: Synchronize Groups - community.okd.openshift_adm_groups_sync: - config: "{{ sync_config }}" - check_mode: yes - register: result - - - name: Validate that 'banking' and 'insurance' groups were created - assert: - that: - - result is changed - - banking_group - - insurance_group - - '"james-allan@ansible.org" in {{ banking_group.users }}' - - '"gordon-kane@ansible.org" in {{ banking_group.users }}' - - '"alice-courtney@ansible.org" in {{ insurance_group.users }}' - - banking_group.users | length == 2 - - insurance_group.users | length == 1 - vars: - banking_group: "{{ result.groups | selectattr('metadata.name', 'equalto', 'banking') | first }}" - insurance_group: "{{ result.groups | selectattr('metadata.name', 'equalto', 'insurance') | first }}" - - - - name: Synchronize Groups (Remove check_mode) - community.okd.openshift_adm_groups_sync: - config: "{{ sync_config }}" - register: result - - - name: Validate Group going to be created - assert: - that: - - result is changed - - - name: Define facts for group to create - set_fact: - ldap_groups: - - name: banking - users: - - "james-allan@ansible.org" - - "gordon-kane@ansible.org" - - name: insurance - users: - - "alice-courtney@ansible.org" - - - - name: Read 'banking' openshift group - kubernetes.core.k8s_info: - kind: Group - version: "user.openshift.io/v1" - name: banking - register: result - - - name: Validate group info - assert: - that: - - result.resources | length == 1 - - '"james-allan@ansible.org" in {{ result.resources.0.users }}' - - '"gordon-kane@ansible.org" in {{ result.resources.0.users }}' - - - name: Read 'insurance' openshift group - kubernetes.core.k8s_info: - kind: Group - version: "user.openshift.io/v1" - name: insurance - register: result - - - name: Validate group info - assert: - that: - - result.resources | length == 1 - - 'result.resources.0.users == ["alice-courtney@ansible.org"]' - - - name: Delete employee from 'insurance' group - openshift_ldap_entry: - bind_dn: "{{ ldap_bind_dn }}" - bind_pw: "{{ ldap_bind_pw }}" - server_uri: "{{ ldap_server_uri }}" - dn: "cn=Alice,ou=employee,ou=augmentedAD,{{ ldap_root }}" - state: absent - - - name: Prune groups - community.okd.openshift_adm_groups_sync: - config: "{{ sync_config }}" - state: absent - register: result - - - name: Validate result is changed (only insurance group be deleted) - assert: - that: - - result is changed - - result.groups | length == 1 - - - name: Get 'insurance' openshift group info - kubernetes.core.k8s_info: - kind: Group - version: "user.openshift.io/v1" - name: insurance - register: result - - - name: assert group was deleted - assert: - that: - - result.resources | length == 0 - - - name: Get 'banking' openshift group info - kubernetes.core.k8s_info: - kind: Group - version: "user.openshift.io/v1" - name: banking - register: result - - - name: assert group was not deleted - assert: - that: - - result.resources | length == 1 - - - name: Prune groups once again (idempotency) - community.okd.openshift_adm_groups_sync: - config: "{{ sync_config }}" - state: absent - register: result - - - name: Assert no change was made - assert: - that: - - result is not changed + - name: Get LDAP definition + set_fact: + ldap_entries: "{{ lookup('template', 'augmented-ad/definition.j2') | from_yaml }}" + + - name: Delete openshift groups if existing + community.okd.k8s: + state: absent + kind: Group + version: "user.openshift.io/v1" + name: "{{ item }}" + with_items: + - banking + - insurance + + - name: Delete existing LDAP entries + openshift_ldap_entry: + bind_dn: "{{ ldap_bind_dn }}" + bind_pw: "{{ ldap_bind_pw }}" + server_uri: "{{ ldap_server_uri }}" + dn: "{{ item.dn }}" + state: absent + with_items: "{{ ldap_entries.users + ldap_entries.groups + ldap_entries.units | reverse | list }}" + + - name: Create LDAP Entries + openshift_ldap_entry: + bind_dn: "{{ ldap_bind_dn }}" + bind_pw: "{{ ldap_bind_pw }}" + server_uri: "{{ ldap_server_uri }}" + dn: "{{ item.dn }}" + attributes: "{{ item.attr }}" + objectClass: "{{ item.class }}" + with_items: "{{ ldap_entries.units + ldap_entries.groups + ldap_entries.users }}" + + - name: Load test configurations + set_fact: + sync_config: "{{ lookup('template', 'augmented-ad/sync-config.j2') | from_yaml }}" + + - name: Synchronize Groups + community.okd.openshift_adm_groups_sync: + config: "{{ sync_config }}" + check_mode: yes + register: result + + - name: Validate that 'banking' and 'insurance' groups were created + assert: + that: + - result is changed + - banking_group + - insurance_group + - '"james-allan@ansible.org" in {{ banking_group.users }}' + - '"gordon-kane@ansible.org" in {{ banking_group.users }}' + - '"alice-courtney@ansible.org" in {{ insurance_group.users }}' + - banking_group.users | length == 2 + - insurance_group.users | length == 1 + vars: + banking_group: "{{ result.groups | selectattr('metadata.name', 'equalto', 'banking') | first }}" + insurance_group: "{{ result.groups | selectattr('metadata.name', 'equalto', 'insurance') | first }}" + + - name: Synchronize Groups (Remove check_mode) + community.okd.openshift_adm_groups_sync: + config: "{{ sync_config }}" + register: result + + - name: Validate Group going to be created + assert: + that: + - result is changed + + - name: Define facts for group to create + set_fact: + ldap_groups: + - name: banking + users: + - "james-allan@ansible.org" + - "gordon-kane@ansible.org" + - name: insurance + users: + - "alice-courtney@ansible.org" + + - name: Read 'banking' openshift group + kubernetes.core.k8s_info: + kind: Group + version: "user.openshift.io/v1" + name: banking + register: result + + - name: Validate group info + assert: + that: + - result.resources | length == 1 + - '"james-allan@ansible.org" in {{ result.resources.0.users }}' + - '"gordon-kane@ansible.org" in {{ result.resources.0.users }}' + + - name: Read 'insurance' openshift group + kubernetes.core.k8s_info: + kind: Group + version: "user.openshift.io/v1" + name: insurance + register: result + + - name: Validate group info + assert: + that: + - result.resources | length == 1 + - 'result.resources.0.users == ["alice-courtney@ansible.org"]' + + - name: Delete employee from 'insurance' group + openshift_ldap_entry: + bind_dn: "{{ ldap_bind_dn }}" + bind_pw: "{{ ldap_bind_pw }}" + server_uri: "{{ ldap_server_uri }}" + dn: "cn=Alice,ou=employee,ou=augmentedAD,{{ ldap_root }}" + state: absent + + - name: Prune groups + community.okd.openshift_adm_groups_sync: + config: "{{ sync_config }}" + state: absent + register: result + + - name: Validate result is changed (only insurance group be deleted) + assert: + that: + - result is changed + - result.groups | length == 1 + + - name: Get 'insurance' openshift group info + kubernetes.core.k8s_info: + kind: Group + version: "user.openshift.io/v1" + name: insurance + register: result + + - name: assert group was deleted + assert: + that: + - result.resources | length == 0 + + - name: Get 'banking' openshift group info + kubernetes.core.k8s_info: + kind: Group + version: "user.openshift.io/v1" + name: banking + register: result + + - name: assert group was not deleted + assert: + that: + - result.resources | length == 1 + + - name: Prune groups once again (idempotency) + community.okd.openshift_adm_groups_sync: + config: "{{ sync_config }}" + state: absent + register: result + + - name: Assert no change was made + assert: + that: + - result is not changed always: - name: Delete openshift groups if existing diff --git a/ansible_collections/community/okd/molecule/default/roles/openshift_adm_groups/tasks/main.yml b/ansible_collections/community/okd/molecule/default/roles/openshift_adm_groups/tasks/main.yml index 88bfd67f8..5dc79c1f0 100644 --- a/ansible_collections/community/okd/molecule/default/roles/openshift_adm_groups/tasks/main.yml +++ b/ansible_collections/community/okd/molecule/default/roles/openshift_adm_groups/tasks/main.yml @@ -1,5 +1,5 @@ --- -- name: Get cluster information +- name: Get cluster information kubernetes.core.k8s_cluster_info: register: info @@ -16,30 +16,29 @@ app: ldap spec: containers: - - name: ldap - image: bitnami/openldap - env: - - name: LDAP_ADMIN_USERNAME - value: "{{ ldap_admin_user }}" - - name: LDAP_ADMIN_PASSWORD - value: "{{ ldap_admin_password }}" - - name: LDAP_USERS - value: "ansible" - - name: LDAP_PASSWORDS - value: "ansible123" - - name: LDAP_ROOT - value: "{{ ldap_root }}" - ports: - - containerPort: 1389 + - name: ldap + image: bitnami/openldap + env: + - name: LDAP_ADMIN_USERNAME + value: "{{ ldap_admin_user }}" + - name: LDAP_ADMIN_PASSWORD + value: "{{ ldap_admin_password }}" + - name: LDAP_USERS + value: "ansible" + - name: LDAP_PASSWORDS + value: "ansible123" + - name: LDAP_ROOT + value: "{{ ldap_root }}" + ports: + - containerPort: 1389 + name: ldap-server register: pod_info -- name: Set Pod Internal IP - set_fact: - podIp: "{{ pod_info.result.status.podIP }}" - - name: Set LDAP Common facts set_fact: - ldap_server_uri: "ldap://{{ podIp }}:1389" + # we can use the Pod IP directly because the integration are running inside a Pod in the + # same openshift cluster + ldap_server_uri: "ldap://{{ pod_info.result.status.podIP }}:1389" ldap_bind_dn: "cn={{ ldap_admin_user }},{{ ldap_root }}" ldap_bind_pw: "{{ ldap_admin_password }}" @@ -53,8 +52,10 @@ bind_pw: "{{ ldap_bind_pw }}" dn: "ou=users,{{ ldap_root }}" server_uri: "{{ ldap_server_uri }}" - # ignore_errors: true - # register: ping_ldap + register: test_ldap + retries: 10 + delay: 5 + until: test_ldap is not failed - include_tasks: "tasks/python-ldap-not-installed.yml" - include_tasks: "tasks/rfc2307.yml" diff --git a/ansible_collections/community/okd/molecule/default/roles/openshift_adm_groups/tasks/python-ldap-not-installed.yml b/ansible_collections/community/okd/molecule/default/roles/openshift_adm_groups/tasks/python-ldap-not-installed.yml index a79af51c2..857ff4903 100644 --- a/ansible_collections/community/okd/molecule/default/roles/openshift_adm_groups/tasks/python-ldap-not-installed.yml +++ b/ansible_collections/community/okd/molecule/default/roles/openshift_adm_groups/tasks/python-ldap-not-installed.yml @@ -1,3 +1,4 @@ +--- - block: - name: Create temp directory tempfile: diff --git a/ansible_collections/community/okd/molecule/default/roles/openshift_adm_groups/tasks/rfc2307.yml b/ansible_collections/community/okd/molecule/default/roles/openshift_adm_groups/tasks/rfc2307.yml index 7660bf625..749dfdae3 100644 --- a/ansible_collections/community/okd/molecule/default/roles/openshift_adm_groups/tasks/rfc2307.yml +++ b/ansible_collections/community/okd/molecule/default/roles/openshift_adm_groups/tasks/rfc2307.yml @@ -1,459 +1,460 @@ +--- - block: - - name: Get LDAP definition - set_fact: - ldap_resources: "{{ lookup('template', 'rfc2307/definition.j2') | from_yaml }}" - - - name: Delete openshift groups if existing - community.okd.k8s: - state: absent - kind: Group - version: "user.openshift.io/v1" - name: "{{ item }}" - with_items: - - admins - - engineers - - developers - - - name: Delete existing LDAP entries - openshift_ldap_entry: - bind_dn: "{{ ldap_bind_dn }}" - bind_pw: "{{ ldap_bind_pw }}" - server_uri: "{{ ldap_server_uri }}" - dn: "{{ item.dn }}" - state: absent - with_items: "{{ ldap_resources.users + ldap_resources.groups + ldap_resources.units | reverse | list }}" - - - name: Create LDAP units - openshift_ldap_entry: - bind_dn: "{{ ldap_bind_dn }}" - bind_pw: "{{ ldap_bind_pw }}" - server_uri: "{{ ldap_server_uri }}" - dn: "{{ item.dn }}" - attributes: "{{ item.attr }}" - objectClass: "{{ item.class }}" - with_items: "{{ ldap_resources.units }}" - - - name: Create LDAP Groups - openshift_ldap_entry: - bind_dn: "{{ ldap_bind_dn }}" - bind_pw: "{{ ldap_bind_pw }}" - server_uri: "{{ ldap_server_uri }}" - dn: "{{ item.dn }}" - attributes: "{{ item.attr }}" - objectClass: "{{ item.class }}" - with_items: "{{ ldap_resources.groups }}" - - - name: Create LDAP users - openshift_ldap_entry: - bind_dn: "{{ ldap_bind_dn }}" - bind_pw: "{{ ldap_bind_pw }}" - server_uri: "{{ ldap_server_uri }}" - dn: "{{ item.dn }}" - attributes: "{{ item.attr }}" - objectClass: "{{ item.class }}" - with_items: "{{ ldap_resources.users }}" - - - name: Load test configurations - set_fact: - configs: "{{ lookup('template', 'rfc2307/sync-config.j2') | from_yaml }}" - - - name: Synchronize Groups - community.okd.openshift_adm_groups_sync: - config: "{{ configs.simple }}" - check_mode: yes - register: result - - - name: Validate Group going to be created - assert: - that: - - result is changed - - admins_group - - devs_group - - '"jane.smith@ansible.org" in {{ admins_group.users }}' - - '"jim.adams@ansible.org" in {{ devs_group.users }}' - - '"jordanbulls@ansible.org" in {{ devs_group.users }}' - - admins_group.users | length == 1 - - devs_group.users | length == 2 - vars: - admins_group: "{{ result.groups | selectattr('metadata.name', 'equalto', 'admins') | first }}" - devs_group: "{{ result.groups | selectattr('metadata.name', 'equalto', 'developers') | first }}" - - - name: Synchronize Groups - User defined mapping - community.okd.openshift_adm_groups_sync: - config: "{{ configs.user_defined }}" - check_mode: yes - register: result - - - name: Validate Group going to be created - assert: - that: - - result is changed - - admins_group - - devs_group - - '"jane.smith@ansible.org" in {{ admins_group.users }}' - - '"jim.adams@ansible.org" in {{ devs_group.users }}' - - '"jordanbulls@ansible.org" in {{ devs_group.users }}' - - admins_group.users | length == 1 - - devs_group.users | length == 2 - vars: - admins_group: "{{ result.groups | selectattr('metadata.name', 'equalto', 'ansible-admins') | first }}" - devs_group: "{{ result.groups | selectattr('metadata.name', 'equalto', 'ansible-devs') | first }}" - - - name: Synchronize Groups - Using dn for every query - community.okd.openshift_adm_groups_sync: - config: "{{ configs.dn_everywhere }}" - check_mode: yes - register: result - - - name: Validate Group going to be created - assert: - that: - - result is changed - - admins_group - - devs_group - - '"cn=Jane,ou=people,ou=rfc2307,{{ ldap_root }}" in {{ admins_group.users }}' - - '"cn=Jim,ou=people,ou=rfc2307,{{ ldap_root }}" in {{ devs_group.users }}' - - '"cn=Jordan,ou=people,ou=rfc2307,{{ ldap_root }}" in {{ devs_group.users }}' - - admins_group.users | length == 1 - - devs_group.users | length == 2 - vars: - admins_group: "{{ result.groups | selectattr('metadata.name', 'equalto', 'cn=admins,ou=groups,ou=rfc2307,' + ldap_root ) | first }}" - devs_group: "{{ result.groups | selectattr('metadata.name', 'equalto', 'cn=developers,ou=groups,ou=rfc2307,' + ldap_root ) | first }}" - - - name: Synchronize Groups - Partially user defined mapping - community.okd.openshift_adm_groups_sync: - config: "{{ configs.partially_user_defined }}" - check_mode: yes - register: result - - - name: Validate Group going to be created - assert: - that: - - result is changed - - admins_group - - devs_group - - '"jane.smith@ansible.org" in {{ admins_group.users }}' - - '"jim.adams@ansible.org" in {{ devs_group.users }}' - - '"jordanbulls@ansible.org" in {{ devs_group.users }}' - - admins_group.users | length == 1 - - devs_group.users | length == 2 - vars: - admins_group: "{{ result.groups | selectattr('metadata.name', 'equalto', 'ansible-admins') | first }}" - devs_group: "{{ result.groups | selectattr('metadata.name', 'equalto', 'developers') | first }}" - - - name: Delete Group 'engineers' if created before - community.okd.k8s: - state: absent - kind: Group - version: "user.openshift.io/v1" - name: 'engineers' - wait: yes - ignore_errors: yes - - - name: Synchronize Groups - Partially user defined mapping - community.okd.openshift_adm_groups_sync: - config: "{{ configs.out_scope }}" - check_mode: yes - register: result - ignore_errors: yes - - - name: Assert group sync failed due to non-existent member - assert: - that: - - result is failed - - result.msg.startswith("Entry not found for base='cn=Matthew,ou=people,ou=outrfc2307,{{ ldap_root }}'") - - - name: Define sync configuration with tolerateMemberNotFoundErrors - set_fact: - config_out_of_scope_tolerate_not_found: "{{ configs.out_scope | combine({'rfc2307': merge_rfc2307 })}}" - vars: - merge_rfc2307: "{{ configs.out_scope.rfc2307 | combine({'tolerateMemberNotFoundErrors': 'true'}) }}" - - - name: Synchronize Groups - Partially user defined mapping (tolerateMemberNotFoundErrors=true) - community.okd.openshift_adm_groups_sync: - config: "{{ config_out_of_scope_tolerate_not_found }}" - check_mode: yes - register: result - - - name: Assert group sync did not fail (tolerateMemberNotFoundErrors=true) - assert: - that: - - result is changed - - result.groups | length == 1 - - result.groups.0.metadata.name == 'engineers' - - result.groups.0.users == ['Abraham'] - - - name: Create Group 'engineers' - community.okd.k8s: - state: present - wait: yes - definition: + - name: Get LDAP definition + set_fact: + ldap_resources: "{{ lookup('template', 'rfc2307/definition.j2') | from_yaml }}" + + - name: Delete openshift groups if existing + community.okd.k8s: + state: absent kind: Group - apiVersion: "user.openshift.io/v1" - metadata: - name: engineers - users: [] - - - name: Try to sync LDAP group with Openshift existing group not created using sync should failed - community.okd.openshift_adm_groups_sync: - config: "{{ config_out_of_scope_tolerate_not_found }}" - check_mode: yes - register: result - ignore_errors: yes - - - name: Validate group sync failed - assert: - that: - - result is failed - - '"openshift.io/ldap.host label did not match sync host" in result.msg' - - - name: Define allow_groups and deny_groups groups - set_fact: - allow_groups: - - "cn=developers,ou=groups,ou=rfc2307,{{ ldap_root }}" - deny_groups: - - "cn=admins,ou=groups,ou=rfc2307,{{ ldap_root }}" - - - name: Synchronize Groups using allow_groups - community.okd.openshift_adm_groups_sync: - config: "{{ configs.simple }}" - allow_groups: "{{ allow_groups }}" - register: result - check_mode: yes - - - name: Validate Group going to be created - assert: - that: - - result is changed - - result.groups | length == 1 - - result.groups.0.metadata.name == "developers" - - - name: Synchronize Groups using deny_groups - community.okd.openshift_adm_groups_sync: - config: "{{ configs.simple }}" - deny_groups: "{{ deny_groups }}" - register: result - check_mode: yes - - - name: Validate Group going to be created - assert: - that: - - result is changed - - result.groups | length == 1 - - result.groups.0.metadata.name == "developers" - - - name: Synchronize groups, remove check_mode - community.okd.openshift_adm_groups_sync: - config: "{{ configs.simple }}" - register: result - - - name: Validate result is changed - assert: - that: - - result is changed - - - name: Read Groups - kubernetes.core.k8s_info: - kind: Group - version: "user.openshift.io/v1" - name: admins - register: result - - - name: Validate group was created - assert: - that: - - result.resources | length == 1 - - '"jane.smith@ansible.org" in {{ result.resources.0.users }}' - - - name: Read Groups - kubernetes.core.k8s_info: - kind: Group - version: "user.openshift.io/v1" - name: developers - register: result - - - name: Validate group was created - assert: - that: - - result.resources | length == 1 - - '"jim.adams@ansible.org" in {{ result.resources.0.users }}' - - '"jordanbulls@ansible.org" in {{ result.resources.0.users }}' - - - name: Set users to delete (no admins users anymore and only 1 developer kept) - set_fact: - users_to_delete: - - "cn=Jane,ou=people,ou=rfc2307,{{ ldap_root }}" - - "cn=Jim,ou=people,ou=rfc2307,{{ ldap_root }}" - - - name: Delete users from LDAP servers - openshift_ldap_entry: - bind_dn: "{{ ldap_bind_dn }}" - bind_pw: "{{ ldap_bind_pw }}" - server_uri: "{{ ldap_server_uri }}" - dn: "{{ item }}" - state: absent - with_items: "{{ users_to_delete }}" - - - name: Define sync configuration with tolerateMemberNotFoundErrors - set_fact: - config_simple_tolerate_not_found: "{{ configs.simple | combine({'rfc2307': merge_rfc2307 })}}" - vars: - merge_rfc2307: "{{ configs.simple.rfc2307 | combine({'tolerateMemberNotFoundErrors': 'true'}) }}" - - - name: Synchronize groups once again after users deletion - community.okd.openshift_adm_groups_sync: - config: "{{ config_simple_tolerate_not_found }}" - register: result - - - name: Validate result is changed - assert: - that: - - result is changed - - - name: Read Groups - kubernetes.core.k8s_info: - kind: Group - version: "user.openshift.io/v1" - name: admins - register: result - - - name: Validate admins group does not contains users anymore - assert: - that: - - result.resources | length == 1 - - result.resources.0.users == [] - - - name: Read Groups - kubernetes.core.k8s_info: - kind: Group - version: "user.openshift.io/v1" - name: developers - register: result - - - name: Validate group was created - assert: - that: - - result.resources | length == 1 - - '"jordanbulls@ansible.org" in {{ result.resources.0.users }}' - - - name: Set group to delete - set_fact: - groups_to_delete: - - "cn=developers,ou=groups,ou=rfc2307,{{ ldap_root }}" - - - name: Delete Group from LDAP servers - openshift_ldap_entry: - bind_dn: "{{ ldap_bind_dn }}" - bind_pw: "{{ ldap_bind_pw }}" - server_uri: "{{ ldap_server_uri }}" - dn: "{{ item }}" - state: absent - with_items: "{{ groups_to_delete }}" - - - name: Prune groups - community.okd.openshift_adm_groups_sync: - config: "{{ config_simple_tolerate_not_found }}" - state: absent - register: result - check_mode: yes - - - name: Validate that only developers group is candidate for Prune - assert: - that: - - result is changed - - result.groups | length == 1 - - result.groups.0.metadata.name == "developers" - - - name: Read Group (validate that check_mode did not performed update in the cluster) - kubernetes.core.k8s_info: - kind: Group - version: "user.openshift.io/v1" - name: developers - register: result - - - name: Assert group was found - assert: - that: - - result.resources | length == 1 - - - name: Prune using allow_groups - community.okd.openshift_adm_groups_sync: - config: "{{ config_simple_tolerate_not_found }}" - allow_groups: - - developers - state: absent - register: result - check_mode: yes - - - name: assert developers group was candidate for prune - assert: - that: - - result is changed - - result.groups | length == 1 - - result.groups.0.metadata.name == "developers" - - - name: Prune using deny_groups - community.okd.openshift_adm_groups_sync: - config: "{{ config_simple_tolerate_not_found }}" - deny_groups: + version: "user.openshift.io/v1" + name: "{{ item }}" + with_items: + - admins + - engineers - developers - state: absent - register: result - check_mode: yes - - - name: assert nothing found candidate for prune - assert: - that: - - result is not changed - - result.groups | length == 0 - - - name: Prune groups - community.okd.openshift_adm_groups_sync: - config: "{{ config_simple_tolerate_not_found }}" - state: absent - register: result - - - name: Validate result is changed - assert: - that: - - result is changed - - result.groups | length == 1 - - - name: Get developers group info - kubernetes.core.k8s_info: - kind: Group - version: "user.openshift.io/v1" - name: developers - register: result - - - name: assert group was deleted - assert: - that: - - result.resources | length == 0 - - - name: Get admins group info - kubernetes.core.k8s_info: - kind: Group - version: "user.openshift.io/v1" - name: admins - register: result - - - name: assert group was not deleted - assert: - that: - - result.resources | length == 1 - - - name: Prune groups once again (idempotency) - community.okd.openshift_adm_groups_sync: - config: "{{ config_simple_tolerate_not_found }}" - state: absent - register: result - - - name: Assert nothing changed - assert: - that: - - result is not changed - - result.groups | length == 0 + + - name: Delete existing LDAP entries + openshift_ldap_entry: + bind_dn: "{{ ldap_bind_dn }}" + bind_pw: "{{ ldap_bind_pw }}" + server_uri: "{{ ldap_server_uri }}" + dn: "{{ item.dn }}" + state: absent + with_items: "{{ ldap_resources.users + ldap_resources.groups + ldap_resources.units | reverse | list }}" + + - name: Create LDAP units + openshift_ldap_entry: + bind_dn: "{{ ldap_bind_dn }}" + bind_pw: "{{ ldap_bind_pw }}" + server_uri: "{{ ldap_server_uri }}" + dn: "{{ item.dn }}" + attributes: "{{ item.attr }}" + objectClass: "{{ item.class }}" + with_items: "{{ ldap_resources.units }}" + + - name: Create LDAP Groups + openshift_ldap_entry: + bind_dn: "{{ ldap_bind_dn }}" + bind_pw: "{{ ldap_bind_pw }}" + server_uri: "{{ ldap_server_uri }}" + dn: "{{ item.dn }}" + attributes: "{{ item.attr }}" + objectClass: "{{ item.class }}" + with_items: "{{ ldap_resources.groups }}" + + - name: Create LDAP users + openshift_ldap_entry: + bind_dn: "{{ ldap_bind_dn }}" + bind_pw: "{{ ldap_bind_pw }}" + server_uri: "{{ ldap_server_uri }}" + dn: "{{ item.dn }}" + attributes: "{{ item.attr }}" + objectClass: "{{ item.class }}" + with_items: "{{ ldap_resources.users }}" + + - name: Load test configurations + set_fact: + configs: "{{ lookup('template', 'rfc2307/sync-config.j2') | from_yaml }}" + + - name: Synchronize Groups + community.okd.openshift_adm_groups_sync: + config: "{{ configs.simple }}" + check_mode: yes + register: result + + - name: Validate Group going to be created + assert: + that: + - result is changed + - admins_group + - devs_group + - '"jane.smith@ansible.org" in {{ admins_group.users }}' + - '"jim.adams@ansible.org" in {{ devs_group.users }}' + - '"jordanbulls@ansible.org" in {{ devs_group.users }}' + - admins_group.users | length == 1 + - devs_group.users | length == 2 + vars: + admins_group: "{{ result.groups | selectattr('metadata.name', 'equalto', 'admins') | first }}" + devs_group: "{{ result.groups | selectattr('metadata.name', 'equalto', 'developers') | first }}" + + - name: Synchronize Groups - User defined mapping + community.okd.openshift_adm_groups_sync: + config: "{{ configs.user_defined }}" + check_mode: yes + register: result + + - name: Validate Group going to be created + assert: + that: + - result is changed + - admins_group + - devs_group + - '"jane.smith@ansible.org" in {{ admins_group.users }}' + - '"jim.adams@ansible.org" in {{ devs_group.users }}' + - '"jordanbulls@ansible.org" in {{ devs_group.users }}' + - admins_group.users | length == 1 + - devs_group.users | length == 2 + vars: + admins_group: "{{ result.groups | selectattr('metadata.name', 'equalto', 'ansible-admins') | first }}" + devs_group: "{{ result.groups | selectattr('metadata.name', 'equalto', 'ansible-devs') | first }}" + + - name: Synchronize Groups - Using dn for every query + community.okd.openshift_adm_groups_sync: + config: "{{ configs.dn_everywhere }}" + check_mode: yes + register: result + + - name: Validate Group going to be created + assert: + that: + - result is changed + - admins_group + - devs_group + - '"cn=Jane,ou=people,ou=rfc2307,{{ ldap_root }}" in {{ admins_group.users }}' + - '"cn=Jim,ou=people,ou=rfc2307,{{ ldap_root }}" in {{ devs_group.users }}' + - '"cn=Jordan,ou=people,ou=rfc2307,{{ ldap_root }}" in {{ devs_group.users }}' + - admins_group.users | length == 1 + - devs_group.users | length == 2 + vars: + admins_group: "{{ result.groups | selectattr('metadata.name', 'equalto', 'cn=admins,ou=groups,ou=rfc2307,' + ldap_root ) | first }}" + devs_group: "{{ result.groups | selectattr('metadata.name', 'equalto', 'cn=developers,ou=groups,ou=rfc2307,' + ldap_root ) | first }}" + + - name: Synchronize Groups - Partially user defined mapping + community.okd.openshift_adm_groups_sync: + config: "{{ configs.partially_user_defined }}" + check_mode: yes + register: result + + - name: Validate Group going to be created + assert: + that: + - result is changed + - admins_group + - devs_group + - '"jane.smith@ansible.org" in {{ admins_group.users }}' + - '"jim.adams@ansible.org" in {{ devs_group.users }}' + - '"jordanbulls@ansible.org" in {{ devs_group.users }}' + - admins_group.users | length == 1 + - devs_group.users | length == 2 + vars: + admins_group: "{{ result.groups | selectattr('metadata.name', 'equalto', 'ansible-admins') | first }}" + devs_group: "{{ result.groups | selectattr('metadata.name', 'equalto', 'developers') | first }}" + + - name: Delete Group 'engineers' if created before + community.okd.k8s: + state: absent + kind: Group + version: "user.openshift.io/v1" + name: 'engineers' + wait: yes + ignore_errors: yes + + - name: Synchronize Groups - Partially user defined mapping + community.okd.openshift_adm_groups_sync: + config: "{{ configs.out_scope }}" + check_mode: yes + register: result + ignore_errors: yes + + - name: Assert group sync failed due to non-existent member + assert: + that: + - result is failed + - result.msg.startswith("Entry not found for base='cn=Matthew,ou=people,ou=outrfc2307,{{ ldap_root }}'") + + - name: Define sync configuration with tolerateMemberNotFoundErrors + set_fact: + config_out_of_scope_tolerate_not_found: "{{ configs.out_scope | combine({'rfc2307': merge_rfc2307 })}}" + vars: + merge_rfc2307: "{{ configs.out_scope.rfc2307 | combine({'tolerateMemberNotFoundErrors': 'true'}) }}" + + - name: Synchronize Groups - Partially user defined mapping (tolerateMemberNotFoundErrors=true) + community.okd.openshift_adm_groups_sync: + config: "{{ config_out_of_scope_tolerate_not_found }}" + check_mode: yes + register: result + + - name: Assert group sync did not fail (tolerateMemberNotFoundErrors=true) + assert: + that: + - result is changed + - result.groups | length == 1 + - result.groups.0.metadata.name == 'engineers' + - result.groups.0.users == ['Abraham'] + + - name: Create Group 'engineers' + community.okd.k8s: + state: present + wait: yes + definition: + kind: Group + apiVersion: "user.openshift.io/v1" + metadata: + name: engineers + users: [] + + - name: Try to sync LDAP group with Openshift existing group not created using sync should failed + community.okd.openshift_adm_groups_sync: + config: "{{ config_out_of_scope_tolerate_not_found }}" + check_mode: yes + register: result + ignore_errors: yes + + - name: Validate group sync failed + assert: + that: + - result is failed + - '"openshift.io/ldap.host label did not match sync host" in result.msg' + + - name: Define allow_groups and deny_groups groups + set_fact: + allow_groups: + - "cn=developers,ou=groups,ou=rfc2307,{{ ldap_root }}" + deny_groups: + - "cn=admins,ou=groups,ou=rfc2307,{{ ldap_root }}" + + - name: Synchronize Groups using allow_groups + community.okd.openshift_adm_groups_sync: + config: "{{ configs.simple }}" + allow_groups: "{{ allow_groups }}" + register: result + check_mode: yes + + - name: Validate Group going to be created + assert: + that: + - result is changed + - result.groups | length == 1 + - result.groups.0.metadata.name == "developers" + + - name: Synchronize Groups using deny_groups + community.okd.openshift_adm_groups_sync: + config: "{{ configs.simple }}" + deny_groups: "{{ deny_groups }}" + register: result + check_mode: yes + + - name: Validate Group going to be created + assert: + that: + - result is changed + - result.groups | length == 1 + - result.groups.0.metadata.name == "developers" + + - name: Synchronize groups, remove check_mode + community.okd.openshift_adm_groups_sync: + config: "{{ configs.simple }}" + register: result + + - name: Validate result is changed + assert: + that: + - result is changed + + - name: Read Groups + kubernetes.core.k8s_info: + kind: Group + version: "user.openshift.io/v1" + name: admins + register: result + + - name: Validate group was created + assert: + that: + - result.resources | length == 1 + - '"jane.smith@ansible.org" in {{ result.resources.0.users }}' + + - name: Read Groups + kubernetes.core.k8s_info: + kind: Group + version: "user.openshift.io/v1" + name: developers + register: result + + - name: Validate group was created + assert: + that: + - result.resources | length == 1 + - '"jim.adams@ansible.org" in {{ result.resources.0.users }}' + - '"jordanbulls@ansible.org" in {{ result.resources.0.users }}' + + - name: Set users to delete (no admins users anymore and only 1 developer kept) + set_fact: + users_to_delete: + - "cn=Jane,ou=people,ou=rfc2307,{{ ldap_root }}" + - "cn=Jim,ou=people,ou=rfc2307,{{ ldap_root }}" + + - name: Delete users from LDAP servers + openshift_ldap_entry: + bind_dn: "{{ ldap_bind_dn }}" + bind_pw: "{{ ldap_bind_pw }}" + server_uri: "{{ ldap_server_uri }}" + dn: "{{ item }}" + state: absent + with_items: "{{ users_to_delete }}" + + - name: Define sync configuration with tolerateMemberNotFoundErrors + set_fact: + config_simple_tolerate_not_found: "{{ configs.simple | combine({'rfc2307': merge_rfc2307 })}}" + vars: + merge_rfc2307: "{{ configs.simple.rfc2307 | combine({'tolerateMemberNotFoundErrors': 'true'}) }}" + + - name: Synchronize groups once again after users deletion + community.okd.openshift_adm_groups_sync: + config: "{{ config_simple_tolerate_not_found }}" + register: result + + - name: Validate result is changed + assert: + that: + - result is changed + + - name: Read Groups + kubernetes.core.k8s_info: + kind: Group + version: "user.openshift.io/v1" + name: admins + register: result + + - name: Validate admins group does not contains users anymore + assert: + that: + - result.resources | length == 1 + - result.resources.0.users == [] + + - name: Read Groups + kubernetes.core.k8s_info: + kind: Group + version: "user.openshift.io/v1" + name: developers + register: result + + - name: Validate group was created + assert: + that: + - result.resources | length == 1 + - '"jordanbulls@ansible.org" in {{ result.resources.0.users }}' + + - name: Set group to delete + set_fact: + groups_to_delete: + - "cn=developers,ou=groups,ou=rfc2307,{{ ldap_root }}" + + - name: Delete Group from LDAP servers + openshift_ldap_entry: + bind_dn: "{{ ldap_bind_dn }}" + bind_pw: "{{ ldap_bind_pw }}" + server_uri: "{{ ldap_server_uri }}" + dn: "{{ item }}" + state: absent + with_items: "{{ groups_to_delete }}" + + - name: Prune groups + community.okd.openshift_adm_groups_sync: + config: "{{ config_simple_tolerate_not_found }}" + state: absent + register: result + check_mode: yes + + - name: Validate that only developers group is candidate for Prune + assert: + that: + - result is changed + - result.groups | length == 1 + - result.groups.0.metadata.name == "developers" + + - name: Read Group (validate that check_mode did not performed update in the cluster) + kubernetes.core.k8s_info: + kind: Group + version: "user.openshift.io/v1" + name: developers + register: result + + - name: Assert group was found + assert: + that: + - result.resources | length == 1 + + - name: Prune using allow_groups + community.okd.openshift_adm_groups_sync: + config: "{{ config_simple_tolerate_not_found }}" + allow_groups: + - developers + state: absent + register: result + check_mode: yes + + - name: assert developers group was candidate for prune + assert: + that: + - result is changed + - result.groups | length == 1 + - result.groups.0.metadata.name == "developers" + + - name: Prune using deny_groups + community.okd.openshift_adm_groups_sync: + config: "{{ config_simple_tolerate_not_found }}" + deny_groups: + - developers + state: absent + register: result + check_mode: yes + + - name: assert nothing found candidate for prune + assert: + that: + - result is not changed + - result.groups | length == 0 + + - name: Prune groups + community.okd.openshift_adm_groups_sync: + config: "{{ config_simple_tolerate_not_found }}" + state: absent + register: result + + - name: Validate result is changed + assert: + that: + - result is changed + - result.groups | length == 1 + + - name: Get developers group info + kubernetes.core.k8s_info: + kind: Group + version: "user.openshift.io/v1" + name: developers + register: result + + - name: assert group was deleted + assert: + that: + - result.resources | length == 0 + + - name: Get admins group info + kubernetes.core.k8s_info: + kind: Group + version: "user.openshift.io/v1" + name: admins + register: result + + - name: assert group was not deleted + assert: + that: + - result.resources | length == 1 + + - name: Prune groups once again (idempotency) + community.okd.openshift_adm_groups_sync: + config: "{{ config_simple_tolerate_not_found }}" + state: absent + register: result + + - name: Assert nothing changed + assert: + that: + - result is not changed + - result.groups | length == 0 always: - name: Delete openshift groups if existing diff --git a/ansible_collections/community/okd/molecule/default/tasks/openshift_adm_prune_auth_clusterroles.yml b/ansible_collections/community/okd/molecule/default/tasks/openshift_adm_prune_auth_clusterroles.yml index 4de4894e2..edf0e17c4 100644 --- a/ansible_collections/community/okd/molecule/default/tasks/openshift_adm_prune_auth_clusterroles.yml +++ b/ansible_collections/community/okd/molecule/default/tasks/openshift_adm_prune_auth_clusterroles.yml @@ -1,293 +1,294 @@ +--- - block: - - set_fact: - test_sa: "clusterrole-sa" - test_ns: "clusterrole-ns" - - - name: Ensure namespace - kubernetes.core.k8s: - kind: Namespace - name: "{{ test_ns }}" - - - name: Get cluster information - kubernetes.core.k8s_cluster_info: - register: cluster_info - no_log: true - - - set_fact: - cluster_host: "{{ cluster_info['connection']['host'] }}" - - - name: Create Service account - kubernetes.core.k8s: - definition: - apiVersion: v1 + - set_fact: + test_sa: "clusterrole-sa" + test_ns: "clusterrole-ns" + + - name: Ensure namespace + kubernetes.core.k8s: + kind: Namespace + name: "{{ test_ns }}" + + - name: Get cluster information + kubernetes.core.k8s_cluster_info: + register: cluster_info + no_log: true + + - set_fact: + cluster_host: "{{ cluster_info['connection']['host'] }}" + + - name: Create Service account + kubernetes.core.k8s: + definition: + apiVersion: v1 + kind: ServiceAccount + metadata: + name: "{{ test_sa }}" + namespace: "{{ test_ns }}" + + - name: Read Service Account + kubernetes.core.k8s_info: kind: ServiceAccount - metadata: - name: "{{ test_sa }}" - namespace: "{{ test_ns }}" - - - name: Read Service Account - kubernetes.core.k8s_info: - kind: ServiceAccount - namespace: "{{ test_ns }}" - name: "{{ test_sa }}" - register: result - - - set_fact: - secret_token: "{{ result.resources[0]['secrets'][0]['name'] }}" - - - name: Get secret details - kubernetes.core.k8s_info: - kind: Secret - namespace: '{{ test_ns }}' - name: '{{ secret_token }}' - register: _secret - retries: 10 - delay: 10 - until: - - ("'openshift.io/token-secret.value' in _secret.resources[0]['metadata']['annotations']") or ("'token' in _secret.resources[0]['data']") - - - set_fact: - api_token: "{{ _secret.resources[0]['metadata']['annotations']['openshift.io/token-secret.value'] }}" - when: "'openshift.io/token-secret.value' in _secret.resources[0]['metadata']['annotations']" - - - set_fact: - api_token: "{{ _secret.resources[0]['data']['token'] | b64decode }}" - when: "'token' in _secret.resources[0]['data']" - - - name: list Node should failed (forbidden user) - kubernetes.core.k8s_info: - api_key: "{{ api_token }}" - host: "{{ cluster_host }}" - validate_certs: no - kind: Node - register: error - ignore_errors: true - - - assert: - that: - - '"nodes is forbidden: User" in error.msg' - - - name: list Pod for all namespace should failed - kubernetes.core.k8s_info: - api_key: "{{ api_token }}" - host: "{{ cluster_host }}" - validate_certs: no - kind: Pod - register: error - ignore_errors: true - - - assert: - that: - - '"pods is forbidden: User" in error.msg' - - - name: list Pod for test namespace should failed - kubernetes.core.k8s_info: - api_key: "{{ api_token }}" - host: "{{ cluster_host }}" - validate_certs: no - kind: Pod - namespace: "{{ test_ns }}" - register: error - ignore_errors: true - - - assert: - that: - - '"pods is forbidden: User" in error.msg' - - - set_fact: - test_labels: - phase: dev - cluster_roles: - - name: pod-manager - resources: - - pods - verbs: - - list - api_version_binding: "authorization.openshift.io/v1" - - name: node-manager - resources: - - nodes - verbs: - - list - api_version_binding: "rbac.authorization.k8s.io/v1" - - - name: Create cluster roles - kubernetes.core.k8s: - definition: - kind: ClusterRole - apiVersion: "rbac.authorization.k8s.io/v1" - metadata: - name: "{{ item.name }}" - labels: "{{ test_labels }}" - rules: - - apiGroups: [""] - resources: "{{ item.resources }}" - verbs: "{{ item.verbs }}" - with_items: '{{ cluster_roles }}' - - - name: Create Role Binding (namespaced) - kubernetes.core.k8s: - definition: - kind: RoleBinding - apiVersion: "rbac.authorization.k8s.io/v1" - metadata: - name: "{{ cluster_roles[0].name }}-binding" - namespace: "{{ test_ns }}" - labels: "{{ test_labels }}" - subjects: - - kind: ServiceAccount - name: "{{ test_sa }}" - namespace: "{{ test_ns }}" - apiGroup: "" - roleRef: - kind: ClusterRole - name: "{{ cluster_roles[0].name }}" - apiGroup: "" - - - name: list Pod for all namespace should failed - kubernetes.core.k8s_info: - api_key: "{{ api_token }}" - host: "{{ cluster_host }}" - validate_certs: no - kind: Pod - register: error - ignore_errors: true - - - assert: - that: - - '"pods is forbidden: User" in error.msg' - - - name: list Pod for test namespace should succeed - kubernetes.core.k8s_info: - api_key: "{{ api_token }}" - host: "{{ cluster_host }}" - validate_certs: no - kind: Pod - namespace: "{{ test_ns }}" - no_log: true - - - name: Create Cluster role Binding - kubernetes.core.k8s: - definition: - kind: ClusterRoleBinding - apiVersion: "{{ item.api_version_binding }}" - metadata: - name: "{{ item.name }}-binding" - labels: "{{ test_labels }}" - subjects: - - kind: ServiceAccount - name: "{{ test_sa }}" - namespace: "{{ test_ns }}" - apiGroup: "" - roleRef: + namespace: "{{ test_ns }}" + name: "{{ test_sa }}" + register: result + + - set_fact: + secret_token: "{{ result.resources[0]['secrets'][0]['name'] }}" + + - name: Get secret details + kubernetes.core.k8s_info: + kind: Secret + namespace: '{{ test_ns }}' + name: '{{ secret_token }}' + register: _secret + retries: 10 + delay: 10 + until: + - ("'openshift.io/token-secret.value' in _secret.resources[0]['metadata']['annotations']") or ("'token' in _secret.resources[0]['data']") + + - set_fact: + api_token: "{{ _secret.resources[0]['metadata']['annotations']['openshift.io/token-secret.value'] }}" + when: "'openshift.io/token-secret.value' in _secret.resources[0]['metadata']['annotations']" + + - set_fact: + api_token: "{{ _secret.resources[0]['data']['token'] | b64decode }}" + when: "'token' in _secret.resources[0]['data']" + + - name: list Node should failed (forbidden user) + kubernetes.core.k8s_info: + api_key: "{{ api_token }}" + host: "{{ cluster_host }}" + validate_certs: no + kind: Node + register: error + ignore_errors: true + + - assert: + that: + - '"nodes is forbidden: User" in error.msg' + + - name: list Pod for all namespace should failed + kubernetes.core.k8s_info: + api_key: "{{ api_token }}" + host: "{{ cluster_host }}" + validate_certs: no + kind: Pod + register: error + ignore_errors: true + + - assert: + that: + - '"pods is forbidden: User" in error.msg' + + - name: list Pod for test namespace should failed + kubernetes.core.k8s_info: + api_key: "{{ api_token }}" + host: "{{ cluster_host }}" + validate_certs: no + kind: Pod + namespace: "{{ test_ns }}" + register: error + ignore_errors: true + + - assert: + that: + - '"pods is forbidden: User" in error.msg' + + - set_fact: + test_labels: + phase: dev + cluster_roles: + - name: pod-manager + resources: + - pods + verbs: + - list + api_version_binding: "authorization.openshift.io/v1" + - name: node-manager + resources: + - nodes + verbs: + - list + api_version_binding: "rbac.authorization.k8s.io/v1" + + - name: Create cluster roles + kubernetes.core.k8s: + definition: kind: ClusterRole - name: "{{ item.name }}" - apiGroup: "" - with_items: "{{ cluster_roles }}" - - - name: list Pod for all namespace should succeed - kubernetes.core.k8s_info: - api_key: "{{ api_token }}" - host: "{{ cluster_host }}" - validate_certs: no - kind: Pod - no_log: true - - - name: list Pod for test namespace should succeed - kubernetes.core.k8s_info: - api_key: "{{ api_token }}" - host: "{{ cluster_host }}" - validate_certs: no - kind: Pod - namespace: "{{ test_ns }}" - no_log: true - - - name: list Node using ServiceAccount - kubernetes.core.k8s_info: - api_key: "{{ api_token }}" - host: "{{ cluster_host }}" - validate_certs: no - kind: Node - namespace: "{{ test_ns }}" - no_log: true - - - name: Prune clusterroles (check mode) - community.okd.openshift_adm_prune_auth: - resource: clusterroles - label_selectors: - - phase=dev - register: check - check_mode: true - - - name: validate clusterrole binding candidates for prune - assert: - that: - - '"{{ item.name }}-binding" in check.cluster_role_binding' - - '"{{ test_ns }}/{{ cluster_roles[0].name }}-binding" in check.role_binding' - with_items: "{{ cluster_roles }}" - - - name: Prune Cluster Role for managing Pod - community.okd.openshift_adm_prune_auth: - resource: clusterroles - name: "{{ cluster_roles[0].name }}" - - - name: list Pod for all namespace should failed - kubernetes.core.k8s_info: - api_key: "{{ api_token }}" - host: "{{ cluster_host }}" - validate_certs: no - kind: Pod - register: error - no_log: true - ignore_errors: true - - - assert: - that: - - '"pods is forbidden: User" in error.msg' - - - name: list Pod for test namespace should failed - kubernetes.core.k8s_info: - api_key: "{{ api_token }}" - host: "{{ cluster_host }}" - validate_certs: no - kind: Pod - namespace: "{{ test_ns }}" - register: error - no_log: true - ignore_errors: true - - - assert: - that: - - '"pods is forbidden: User" in error.msg' - - - name: list Node using ServiceAccount - kubernetes.core.k8s_info: - api_key: "{{ api_token }}" - host: "{{ cluster_host }}" - validate_certs: no - kind: Node - namespace: "{{ test_ns }}" - no_log: true - - - name: Prune clusterroles (remaining) - community.okd.openshift_adm_prune_auth: - resource: clusterroles - label_selectors: - - phase=dev - - - name: list Node using ServiceAccount should fail - kubernetes.core.k8s_info: - api_key: "{{ api_token }}" - host: "{{ cluster_host }}" - validate_certs: no - kind: Node - namespace: "{{ test_ns }}" - register: error - ignore_errors: true - - - assert: - that: - - '"nodes is forbidden: User" in error.msg' + apiVersion: "rbac.authorization.k8s.io/v1" + metadata: + name: "{{ item.name }}" + labels: "{{ test_labels }}" + rules: + - apiGroups: [""] + resources: "{{ item.resources }}" + verbs: "{{ item.verbs }}" + with_items: '{{ cluster_roles }}' + + - name: Create Role Binding (namespaced) + kubernetes.core.k8s: + definition: + kind: RoleBinding + apiVersion: "rbac.authorization.k8s.io/v1" + metadata: + name: "{{ cluster_roles[0].name }}-binding" + namespace: "{{ test_ns }}" + labels: "{{ test_labels }}" + subjects: + - kind: ServiceAccount + name: "{{ test_sa }}" + namespace: "{{ test_ns }}" + apiGroup: "" + roleRef: + kind: ClusterRole + name: "{{ cluster_roles[0].name }}" + apiGroup: "" + + - name: list Pod for all namespace should failed + kubernetes.core.k8s_info: + api_key: "{{ api_token }}" + host: "{{ cluster_host }}" + validate_certs: no + kind: Pod + register: error + ignore_errors: true + + - assert: + that: + - '"pods is forbidden: User" in error.msg' + + - name: list Pod for test namespace should succeed + kubernetes.core.k8s_info: + api_key: "{{ api_token }}" + host: "{{ cluster_host }}" + validate_certs: no + kind: Pod + namespace: "{{ test_ns }}" + no_log: true + + - name: Create Cluster role Binding + kubernetes.core.k8s: + definition: + kind: ClusterRoleBinding + apiVersion: "{{ item.api_version_binding }}" + metadata: + name: "{{ item.name }}-binding" + labels: "{{ test_labels }}" + subjects: + - kind: ServiceAccount + name: "{{ test_sa }}" + namespace: "{{ test_ns }}" + apiGroup: "" + roleRef: + kind: ClusterRole + name: "{{ item.name }}" + apiGroup: "" + with_items: "{{ cluster_roles }}" + + - name: list Pod for all namespace should succeed + kubernetes.core.k8s_info: + api_key: "{{ api_token }}" + host: "{{ cluster_host }}" + validate_certs: no + kind: Pod + no_log: true + + - name: list Pod for test namespace should succeed + kubernetes.core.k8s_info: + api_key: "{{ api_token }}" + host: "{{ cluster_host }}" + validate_certs: no + kind: Pod + namespace: "{{ test_ns }}" + no_log: true + + - name: list Node using ServiceAccount + kubernetes.core.k8s_info: + api_key: "{{ api_token }}" + host: "{{ cluster_host }}" + validate_certs: no + kind: Node + namespace: "{{ test_ns }}" + no_log: true + + - name: Prune clusterroles (check mode) + community.okd.openshift_adm_prune_auth: + resource: clusterroles + label_selectors: + - phase=dev + register: check + check_mode: true + + - name: validate clusterrole binding candidates for prune + assert: + that: + - '"{{ item.name }}-binding" in check.cluster_role_binding' + - '"{{ test_ns }}/{{ cluster_roles[0].name }}-binding" in check.role_binding' + with_items: "{{ cluster_roles }}" + + - name: Prune Cluster Role for managing Pod + community.okd.openshift_adm_prune_auth: + resource: clusterroles + name: "{{ cluster_roles[0].name }}" + + - name: list Pod for all namespace should failed + kubernetes.core.k8s_info: + api_key: "{{ api_token }}" + host: "{{ cluster_host }}" + validate_certs: no + kind: Pod + register: error + no_log: true + ignore_errors: true + + - assert: + that: + - '"pods is forbidden: User" in error.msg' + + - name: list Pod for test namespace should failed + kubernetes.core.k8s_info: + api_key: "{{ api_token }}" + host: "{{ cluster_host }}" + validate_certs: no + kind: Pod + namespace: "{{ test_ns }}" + register: error + no_log: true + ignore_errors: true + + - assert: + that: + - '"pods is forbidden: User" in error.msg' + + - name: list Node using ServiceAccount + kubernetes.core.k8s_info: + api_key: "{{ api_token }}" + host: "{{ cluster_host }}" + validate_certs: no + kind: Node + namespace: "{{ test_ns }}" + no_log: true + + - name: Prune clusterroles (remaining) + community.okd.openshift_adm_prune_auth: + resource: clusterroles + label_selectors: + - phase=dev + + - name: list Node using ServiceAccount should fail + kubernetes.core.k8s_info: + api_key: "{{ api_token }}" + host: "{{ cluster_host }}" + validate_certs: no + kind: Node + namespace: "{{ test_ns }}" + register: error + ignore_errors: true + + - assert: + that: + - '"nodes is forbidden: User" in error.msg' always: - name: Ensure namespace is deleted diff --git a/ansible_collections/community/okd/molecule/default/tasks/openshift_adm_prune_auth_roles.yml b/ansible_collections/community/okd/molecule/default/tasks/openshift_adm_prune_auth_roles.yml index 1724a1938..7b75a0cfe 100644 --- a/ansible_collections/community/okd/molecule/default/tasks/openshift_adm_prune_auth_roles.yml +++ b/ansible_collections/community/okd/molecule/default/tasks/openshift_adm_prune_auth_roles.yml @@ -1,335 +1,336 @@ +--- - block: - - set_fact: - test_ns: "prune-roles" - sa_name: "roles-sa" - pod_name: "pod-prune" - role_definition: - - name: pod-list - labels: - action: list - verbs: - - list - role_binding: - api_version: rbac.authorization.k8s.io/v1 - - name: pod-create - labels: - action: create - verbs: - - create - - get - role_binding: - api_version: authorization.openshift.io/v1 - - name: pod-delete - labels: - action: delete - verbs: - - delete - role_binding: - api_version: rbac.authorization.k8s.io/v1 - - - name: Ensure namespace - kubernetes.core.k8s: - kind: Namespace - name: '{{ test_ns }}' - - - name: Get cluster information - kubernetes.core.k8s_cluster_info: - register: cluster_info - no_log: true - - - set_fact: - cluster_host: "{{ cluster_info['connection']['host'] }}" - - - name: Create Service account - kubernetes.core.k8s: - definition: - apiVersion: v1 + - set_fact: + test_ns: "prune-roles" + sa_name: "roles-sa" + pod_name: "pod-prune" + role_definition: + - name: pod-list + labels: + action: list + verbs: + - list + role_binding: + api_version: rbac.authorization.k8s.io/v1 + - name: pod-create + labels: + action: create + verbs: + - create + - get + role_binding: + api_version: authorization.openshift.io/v1 + - name: pod-delete + labels: + action: delete + verbs: + - delete + role_binding: + api_version: rbac.authorization.k8s.io/v1 + + - name: Ensure namespace + kubernetes.core.k8s: + kind: Namespace + name: '{{ test_ns }}' + + - name: Get cluster information + kubernetes.core.k8s_cluster_info: + register: cluster_info + no_log: true + + - set_fact: + cluster_host: "{{ cluster_info['connection']['host'] }}" + + - name: Create Service account + kubernetes.core.k8s: + definition: + apiVersion: v1 + kind: ServiceAccount + metadata: + name: '{{ sa_name }}' + namespace: '{{ test_ns }}' + + - name: Read Service Account + kubernetes.core.k8s_info: kind: ServiceAccount - metadata: - name: '{{ sa_name }}' - namespace: '{{ test_ns }}' - - - name: Read Service Account - kubernetes.core.k8s_info: - kind: ServiceAccount - namespace: '{{ test_ns }}' - name: '{{ sa_name }}' - register: sa_out - - - set_fact: - secret_token: "{{ sa_out.resources[0]['secrets'][0]['name'] }}" - - - name: Get secret details - kubernetes.core.k8s_info: - kind: Secret - namespace: '{{ test_ns }}' - name: '{{ secret_token }}' - register: r_secret - retries: 10 - delay: 10 - until: - - ("'openshift.io/token-secret.value' in r_secret.resources[0]['metadata']['annotations']") or ("'token' in r_secret.resources[0]['data']") - - - set_fact: - api_token: "{{ r_secret.resources[0]['metadata']['annotations']['openshift.io/token-secret.value'] }}" - when: "'openshift.io/token-secret.value' in r_secret.resources[0]['metadata']['annotations']" - - - set_fact: - api_token: "{{ r_secret.resources[0]['data']['token'] | b64decode }}" - when: "'token' in r_secret.resources[0]['data']" - - - name: list resources using service account - kubernetes.core.k8s_info: - api_key: '{{ api_token }}' - host: '{{ cluster_host }}' - validate_certs: no - kind: Pod - namespace: '{{ test_ns }}' - register: error - ignore_errors: true - - - assert: - that: - - '"pods is forbidden: User" in error.msg' - - - name: Create a role to manage Pod from namespace "{{ test_ns }}" - kubernetes.core.k8s: - definition: - kind: Role - apiVersion: rbac.authorization.k8s.io/v1 - metadata: - namespace: "{{ test_ns }}" - name: "{{ item.name }}" - labels: "{{ item.labels }}" - rules: - - apiGroups: [""] - resources: ["pods"] - verbs: "{{ item.verbs }}" - with_items: "{{ role_definition }}" - - - name: Create Role Binding - kubernetes.core.k8s: - definition: - kind: RoleBinding - apiVersion: "{{ item.role_binding.api_version }}" - metadata: - name: "{{ item.name }}-bind" - namespace: "{{ test_ns }}" - subjects: - - kind: ServiceAccount - name: "{{ sa_name }}" - namespace: "{{ test_ns }}" - apiGroup: "" - roleRef: + namespace: '{{ test_ns }}' + name: '{{ sa_name }}' + register: sa_out + + - set_fact: + secret_token: "{{ sa_out.resources[0]['secrets'][0]['name'] }}" + + - name: Get secret details + kubernetes.core.k8s_info: + kind: Secret + namespace: '{{ test_ns }}' + name: '{{ secret_token }}' + register: r_secret + retries: 10 + delay: 10 + until: + - ("'openshift.io/token-secret.value' in r_secret.resources[0]['metadata']['annotations']") or ("'token' in r_secret.resources[0]['data']") + + - set_fact: + api_token: "{{ r_secret.resources[0]['metadata']['annotations']['openshift.io/token-secret.value'] }}" + when: "'openshift.io/token-secret.value' in r_secret.resources[0]['metadata']['annotations']" + + - set_fact: + api_token: "{{ r_secret.resources[0]['data']['token'] | b64decode }}" + when: "'token' in r_secret.resources[0]['data']" + + - name: list resources using service account + kubernetes.core.k8s_info: + api_key: '{{ api_token }}' + host: '{{ cluster_host }}' + validate_certs: no + kind: Pod + namespace: '{{ test_ns }}' + register: error + ignore_errors: true + + - assert: + that: + - '"pods is forbidden: User" in error.msg' + + - name: Create a role to manage Pod from namespace "{{ test_ns }}" + kubernetes.core.k8s: + definition: kind: Role - name: "{{ item.name }}" - namespace: "{{ test_ns }}" - apiGroup: "" - with_items: "{{ role_definition }}" - - - name: Create Pod should succeed - kubernetes.core.k8s: - api_key: "{{ api_token }}" - host: "{{ cluster_host }}" - validate_certs: no - namespace: "{{ test_ns }}" - definition: + apiVersion: rbac.authorization.k8s.io/v1 + metadata: + namespace: "{{ test_ns }}" + name: "{{ item.name }}" + labels: "{{ item.labels }}" + rules: + - apiGroups: [""] + resources: ["pods"] + verbs: "{{ item.verbs }}" + with_items: "{{ role_definition }}" + + - name: Create Role Binding + kubernetes.core.k8s: + definition: + kind: RoleBinding + apiVersion: "{{ item.role_binding.api_version }}" + metadata: + name: "{{ item.name }}-bind" + namespace: "{{ test_ns }}" + subjects: + - kind: ServiceAccount + name: "{{ sa_name }}" + namespace: "{{ test_ns }}" + apiGroup: "" + roleRef: + kind: Role + name: "{{ item.name }}" + namespace: "{{ test_ns }}" + apiGroup: "" + with_items: "{{ role_definition }}" + + - name: Create Pod should succeed + kubernetes.core.k8s: + api_key: "{{ api_token }}" + host: "{{ cluster_host }}" + validate_certs: no + namespace: "{{ test_ns }}" + definition: + kind: Pod + metadata: + name: "{{ pod_name }}" + spec: + containers: + - name: python + image: python:3.7-alpine + command: + - /bin/sh + - -c + - while true; do echo $(date); sleep 15; done + imagePullPolicy: IfNotPresent + register: result + + - name: assert pod creation succeed + assert: + that: + - result is successful + + - name: List Pod + kubernetes.core.k8s_info: + api_key: "{{ api_token }}" + host: "{{ cluster_host }}" + validate_certs: no + namespace: "{{ test_ns }}" kind: Pod - metadata: - name: "{{ pod_name }}" - spec: - containers: - - name: python - image: python:3.7-alpine - command: - - /bin/sh - - -c - - while true; do echo $(date); sleep 15; done - imagePullPolicy: IfNotPresent - register: result - - - name: assert pod creation succeed - assert: - that: - - result is successful - - - name: List Pod - kubernetes.core.k8s_info: - api_key: "{{ api_token }}" - host: "{{ cluster_host }}" - validate_certs: no - namespace: "{{ test_ns }}" - kind: Pod - register: result - - - name: assert user is still authorize to list pods - assert: - that: - - result is successful - - - name: Prune auth roles (check mode) - community.okd.openshift_adm_prune_auth: - resource: roles - namespace: "{{ test_ns }}" - register: check - check_mode: true - - - name: validate that list role binding are candidates for prune - assert: - that: '"{{ test_ns }}/{{ item.name }}-bind" in check.role_binding' - with_items: "{{ role_definition }}" - - - name: Prune resource using label_selectors option - community.okd.openshift_adm_prune_auth: - resource: roles - namespace: "{{ test_ns }}" - label_selectors: - - action=delete - register: prune - - - name: assert that role binding 'delete' was pruned - assert: - that: - - prune is changed - - '"{{ test_ns }}/{{ role_definition[2].name }}-bind" in check.role_binding' - - - name: assert that user could not delete pod anymore - kubernetes.core.k8s: - api_key: "{{ api_token }}" - host: "{{ cluster_host }}" - validate_certs: no - state: absent - namespace: "{{ test_ns }}" - kind: Pod - name: "{{ pod_name }}" - register: result - ignore_errors: true - - - name: assert pod deletion failed due to forbidden user - assert: - that: - - '"forbidden: User" in error.msg' - - - name: List Pod - kubernetes.core.k8s_info: - api_key: "{{ api_token }}" - host: "{{ cluster_host }}" - validate_certs: no - namespace: "{{ test_ns }}" - kind: Pod - register: result - - - name: assert user is still able to list pods - assert: - that: - - result is successful - - - name: Create Pod should succeed - kubernetes.core.k8s: - api_key: "{{ api_token }}" - host: "{{ cluster_host }}" - validate_certs: no - namespace: "{{ test_ns }}" - definition: + register: result + + - name: assert user is still authorize to list pods + assert: + that: + - result is successful + + - name: Prune auth roles (check mode) + community.okd.openshift_adm_prune_auth: + resource: roles + namespace: "{{ test_ns }}" + register: check + check_mode: true + + - name: validate that list role binding are candidates for prune + assert: + that: '"{{ test_ns }}/{{ item.name }}-bind" in check.role_binding' + with_items: "{{ role_definition }}" + + - name: Prune resource using label_selectors option + community.okd.openshift_adm_prune_auth: + resource: roles + namespace: "{{ test_ns }}" + label_selectors: + - action=delete + register: prune + + - name: assert that role binding 'delete' was pruned + assert: + that: + - prune is changed + - '"{{ test_ns }}/{{ role_definition[2].name }}-bind" in check.role_binding' + + - name: assert that user could not delete pod anymore + kubernetes.core.k8s: + api_key: "{{ api_token }}" + host: "{{ cluster_host }}" + validate_certs: no + state: absent + namespace: "{{ test_ns }}" kind: Pod - metadata: - name: "{{ pod_name }}-1" - spec: - containers: - - name: python - image: python:3.7-alpine - command: - - /bin/sh - - -c - - while true; do echo $(date); sleep 15; done - imagePullPolicy: IfNotPresent - register: result - - - name: assert user is still authorize to create pod - assert: - that: - - result is successful - - - name: Prune role using name - community.okd.openshift_adm_prune_auth: - resource: roles - namespace: "{{ test_ns }}" - name: "{{ role_definition[1].name }}" - register: prune - - - name: assert that role binding 'create' was pruned - assert: - that: - - prune is changed - - '"{{ test_ns }}/{{ role_definition[1].name }}-bind" in check.role_binding' - - - name: Create Pod (should failed) - kubernetes.core.k8s: - api_key: "{{ api_token }}" - host: "{{ cluster_host }}" - validate_certs: no - namespace: "{{ test_ns }}" - definition: + name: "{{ pod_name }}" + register: result + ignore_errors: true + + - name: assert pod deletion failed due to forbidden user + assert: + that: + - '"forbidden: User" in error.msg' + + - name: List Pod + kubernetes.core.k8s_info: + api_key: "{{ api_token }}" + host: "{{ cluster_host }}" + validate_certs: no + namespace: "{{ test_ns }}" kind: Pod - metadata: - name: "{{ pod_name }}-2" - spec: - containers: - - name: python - image: python:3.7-alpine - command: - - /bin/sh - - -c - - while true; do echo $(date); sleep 15; done - imagePullPolicy: IfNotPresent - register: result - ignore_errors: true - - - name: assert user is not authorize to create pod anymore - assert: - that: - - '"forbidden: User" in error.msg' - - - name: List Pod - kubernetes.core.k8s_info: - api_key: "{{ api_token }}" - host: "{{ cluster_host }}" - validate_certs: no - namespace: "{{ test_ns }}" - kind: Pod - register: result - - - name: assert user is still able to list pods - assert: - that: - - result is successful - - - name: Prune all role for namespace (neither name nor label_selectors are specified) - community.okd.openshift_adm_prune_auth: - resource: roles - namespace: "{{ test_ns }}" - register: prune - - - name: assert that role binding 'list' was pruned - assert: - that: - - prune is changed - - '"{{ test_ns }}/{{ role_definition[0].name }}-bind" in check.role_binding' - - - name: List Pod - kubernetes.core.k8s_info: - api_key: "{{ api_token }}" - host: "{{ cluster_host }}" - validate_certs: no - namespace: "{{ test_ns }}" - kind: Pod - register: result - ignore_errors: true - - - name: assert user is not authorize to list pod anymore - assert: - that: - - '"forbidden: User" in error.msg' + register: result + + - name: assert user is still able to list pods + assert: + that: + - result is successful + + - name: Create Pod should succeed + kubernetes.core.k8s: + api_key: "{{ api_token }}" + host: "{{ cluster_host }}" + validate_certs: no + namespace: "{{ test_ns }}" + definition: + kind: Pod + metadata: + name: "{{ pod_name }}-1" + spec: + containers: + - name: python + image: python:3.7-alpine + command: + - /bin/sh + - -c + - while true; do echo $(date); sleep 15; done + imagePullPolicy: IfNotPresent + register: result + + - name: assert user is still authorize to create pod + assert: + that: + - result is successful + + - name: Prune role using name + community.okd.openshift_adm_prune_auth: + resource: roles + namespace: "{{ test_ns }}" + name: "{{ role_definition[1].name }}" + register: prune + + - name: assert that role binding 'create' was pruned + assert: + that: + - prune is changed + - '"{{ test_ns }}/{{ role_definition[1].name }}-bind" in check.role_binding' + + - name: Create Pod (should failed) + kubernetes.core.k8s: + api_key: "{{ api_token }}" + host: "{{ cluster_host }}" + validate_certs: no + namespace: "{{ test_ns }}" + definition: + kind: Pod + metadata: + name: "{{ pod_name }}-2" + spec: + containers: + - name: python + image: python:3.7-alpine + command: + - /bin/sh + - -c + - while true; do echo $(date); sleep 15; done + imagePullPolicy: IfNotPresent + register: result + ignore_errors: true + + - name: assert user is not authorize to create pod anymore + assert: + that: + - '"forbidden: User" in error.msg' + + - name: List Pod + kubernetes.core.k8s_info: + api_key: "{{ api_token }}" + host: "{{ cluster_host }}" + validate_certs: no + namespace: "{{ test_ns }}" + kind: Pod + register: result + + - name: assert user is still able to list pods + assert: + that: + - result is successful + + - name: Prune all role for namespace (neither name nor label_selectors are specified) + community.okd.openshift_adm_prune_auth: + resource: roles + namespace: "{{ test_ns }}" + register: prune + + - name: assert that role binding 'list' was pruned + assert: + that: + - prune is changed + - '"{{ test_ns }}/{{ role_definition[0].name }}-bind" in check.role_binding' + + - name: List Pod + kubernetes.core.k8s_info: + api_key: "{{ api_token }}" + host: "{{ cluster_host }}" + validate_certs: no + namespace: "{{ test_ns }}" + kind: Pod + register: result + ignore_errors: true + + - name: assert user is not authorize to list pod anymore + assert: + that: + - '"forbidden: User" in error.msg' always: - name: Ensure namespace is deleted diff --git a/ansible_collections/community/okd/molecule/default/tasks/openshift_adm_prune_deployments.yml b/ansible_collections/community/okd/molecule/default/tasks/openshift_adm_prune_deployments.yml index baa024188..ca48f7327 100644 --- a/ansible_collections/community/okd/molecule/default/tasks/openshift_adm_prune_deployments.yml +++ b/ansible_collections/community/okd/molecule/default/tasks/openshift_adm_prune_deployments.yml @@ -1,255 +1,255 @@ +--- - name: Prune deployments block: - - set_fact: - dc_name: "hello" - deployment_ns: "prune-deployments" - deployment_ns_2: "prune-deployments-2" - + - set_fact: + dc_name: "hello" + deployment_ns: "prune-deployments" + deployment_ns_2: "prune-deployments-2" - - name: Ensure namespace - community.okd.k8s: - kind: Namespace - name: '{{ deployment_ns }}' + - name: Ensure namespace + community.okd.k8s: + kind: Namespace + name: '{{ deployment_ns }}' - - name: Create deployment config - community.okd.k8s: - namespace: '{{ deployment_ns }}' - definition: - kind: DeploymentConfig - apiVersion: apps.openshift.io/v1 - metadata: - name: '{{ dc_name }}' - spec: - replicas: 1 - selector: + - name: Create deployment config + community.okd.k8s: + namespace: '{{ deployment_ns }}' + definition: + kind: DeploymentConfig + apiVersion: apps.openshift.io/v1 + metadata: name: '{{ dc_name }}' - template: - metadata: - labels: - name: '{{ dc_name }}' - spec: - containers: - - name: hello-openshift - imagePullPolicy: IfNotPresent - image: python:3.7-alpine - command: [ "/bin/sh", "-c", "while true;do date;sleep 2s; done"] - wait: yes + spec: + replicas: 1 + selector: + name: '{{ dc_name }}' + template: + metadata: + labels: + name: '{{ dc_name }}' + spec: + containers: + - name: hello-openshift + imagePullPolicy: IfNotPresent + image: python:3.7-alpine + command: [ "/bin/sh", "-c", "while true;do date;sleep 2s; done"] + wait: yes - - name: prune deployments (no candidate DeploymentConfig) - community.okd.openshift_adm_prune_deployments: - namespace: "{{ deployment_ns }}" - register: test_prune + - name: prune deployments (no candidate DeploymentConfig) + community.okd.openshift_adm_prune_deployments: + namespace: "{{ deployment_ns }}" + register: test_prune - - assert: - that: - - test_prune is not changed - - test_prune.replication_controllers | length == 0 + - assert: + that: + - test_prune is not changed + - test_prune.replication_controllers | length == 0 - - name: Update DeploymentConfig - set replicas to 0 - community.okd.k8s: - namespace: "{{ deployment_ns }}" - definition: - kind: DeploymentConfig - apiVersion: "apps.openshift.io/v1" - metadata: - name: "{{ dc_name }}" - spec: - replicas: 0 - selector: + - name: Update DeploymentConfig - set replicas to 0 + community.okd.k8s: + namespace: "{{ deployment_ns }}" + definition: + kind: DeploymentConfig + apiVersion: "apps.openshift.io/v1" + metadata: name: "{{ dc_name }}" - template: - metadata: - labels: - name: "{{ dc_name }}" - spec: - containers: - - name: hello-openshift - imagePullPolicy: IfNotPresent - image: python:3.7-alpine - command: [ "/bin/sh", "-c", "while true;do date;sleep 2s; done"] - wait: yes + spec: + replicas: 0 + selector: + name: "{{ dc_name }}" + template: + metadata: + labels: + name: "{{ dc_name }}" + spec: + containers: + - name: hello-openshift + imagePullPolicy: IfNotPresent + image: python:3.7-alpine + command: [ "/bin/sh", "-c", "while true;do date;sleep 2s; done"] + wait: yes - - name: Wait for ReplicationController candidate for pruning - kubernetes.core.k8s_info: - kind: ReplicationController - namespace: "{{ deployment_ns }}" - register: result - retries: 10 - delay: 30 - until: - - result.resources.0.metadata.annotations["openshift.io/deployment.phase"] in ("Failed", "Complete") + - name: Wait for ReplicationController candidate for pruning + kubernetes.core.k8s_info: + kind: ReplicationController + namespace: "{{ deployment_ns }}" + register: result + retries: 10 + delay: 30 + until: + - result.resources.0.metadata.annotations["openshift.io/deployment.phase"] in ("Failed", "Complete") - - name: Prune deployments - should delete 1 ReplicationController - community.okd.openshift_adm_prune_deployments: - namespace: "{{ deployment_ns }}" - check_mode: yes - register: test_prune + - name: Prune deployments - should delete 1 ReplicationController + community.okd.openshift_adm_prune_deployments: + namespace: "{{ deployment_ns }}" + check_mode: yes + register: test_prune - - name: Read ReplicationController - kubernetes.core.k8s_info: - kind: ReplicationController - namespace: "{{ deployment_ns }}" - register: replications + - name: Read ReplicationController + kubernetes.core.k8s_info: + kind: ReplicationController + namespace: "{{ deployment_ns }}" + register: replications - - name: Assert that Replication controller was not deleted - assert: - that: - - replications.resources | length == 1 - - 'replications.resources.0.metadata.name is match("{{ dc_name }}-*")' + - name: Assert that Replication controller was not deleted + assert: + that: + - replications.resources | length == 1 + - 'replications.resources.0.metadata.name is match("{{ dc_name }}-*")' - - name: Assure that candidate ReplicationController was found for pruning - assert: - that: - - test_prune is changed - - test_prune.replication_controllers | length == 1 - - test_prune.replication_controllers.0.metadata.name == replications.resources.0.metadata.name - - test_prune.replication_controllers.0.metadata.namespace == replications.resources.0.metadata.namespace + - name: Assure that candidate ReplicationController was found for pruning + assert: + that: + - test_prune is changed + - test_prune.replication_controllers | length == 1 + - test_prune.replication_controllers.0.metadata.name == replications.resources.0.metadata.name + - test_prune.replication_controllers.0.metadata.namespace == replications.resources.0.metadata.namespace - - name: Prune deployments - keep younger than 45min (check_mode) - community.okd.openshift_adm_prune_deployments: - keep_younger_than: 45 - namespace: "{{ deployment_ns }}" - check_mode: true - register: keep_younger + - name: Prune deployments - keep younger than 45min (check_mode) + community.okd.openshift_adm_prune_deployments: + keep_younger_than: 45 + namespace: "{{ deployment_ns }}" + check_mode: true + register: keep_younger - - name: assert no candidate was found - assert: - that: - - keep_younger is not changed - - keep_younger.replication_controllers == [] + - name: assert no candidate was found + assert: + that: + - keep_younger is not changed + - keep_younger.replication_controllers == [] - - name: Ensure second namespace is created - community.okd.k8s: - kind: Namespace - name: '{{ deployment_ns_2 }}' + - name: Ensure second namespace is created + community.okd.k8s: + kind: Namespace + name: '{{ deployment_ns_2 }}' - - name: Create deployment config from 2nd namespace - community.okd.k8s: - namespace: '{{ deployment_ns_2 }}' - definition: - kind: DeploymentConfig - apiVersion: apps.openshift.io/v1 - metadata: - name: '{{ dc_name }}2' - spec: - replicas: 1 - selector: + - name: Create deployment config from 2nd namespace + community.okd.k8s: + namespace: '{{ deployment_ns_2 }}' + definition: + kind: DeploymentConfig + apiVersion: apps.openshift.io/v1 + metadata: name: '{{ dc_name }}2' - template: - metadata: - labels: - name: '{{ dc_name }}2' - spec: - containers: - - name: hello-openshift - imagePullPolicy: IfNotPresent - image: python:3.7-alpine - command: [ "/bin/sh", "-c", "while true;do date;sleep 2s; done"] - wait: yes + spec: + replicas: 1 + selector: + name: '{{ dc_name }}2' + template: + metadata: + labels: + name: '{{ dc_name }}2' + spec: + containers: + - name: hello-openshift + imagePullPolicy: IfNotPresent + image: python:3.7-alpine + command: [ "/bin/sh", "-c", "while true;do date;sleep 2s; done"] + wait: yes - - name: Stop deployment config - replicas = 0 - community.okd.k8s: - namespace: '{{ deployment_ns_2 }}' - definition: - kind: DeploymentConfig - apiVersion: apps.openshift.io/v1 - metadata: - name: '{{ dc_name }}2' - spec: - replicas: 0 - selector: + - name: Stop deployment config - replicas = 0 + community.okd.k8s: + namespace: '{{ deployment_ns_2 }}' + definition: + kind: DeploymentConfig + apiVersion: apps.openshift.io/v1 + metadata: name: '{{ dc_name }}2' - template: - metadata: - labels: - name: '{{ dc_name }}2' - spec: - containers: - - name: hello-openshift - imagePullPolicy: IfNotPresent - image: python:3.7-alpine - command: [ "/bin/sh", "-c", "while true;do date;sleep 2s; done"] - wait: yes + spec: + replicas: 0 + selector: + name: '{{ dc_name }}2' + template: + metadata: + labels: + name: '{{ dc_name }}2' + spec: + containers: + - name: hello-openshift + imagePullPolicy: IfNotPresent + image: python:3.7-alpine + command: [ "/bin/sh", "-c", "while true;do date;sleep 2s; done"] + wait: yes + + - name: Wait for ReplicationController candidate for pruning + kubernetes.core.k8s_info: + kind: ReplicationController + namespace: "{{ deployment_ns_2 }}" + register: result + retries: 10 + delay: 30 + until: + - result.resources.0.metadata.annotations["openshift.io/deployment.phase"] in ("Failed", "Complete") - - name: Wait for ReplicationController candidate for pruning - kubernetes.core.k8s_info: - kind: ReplicationController - namespace: "{{ deployment_ns_2 }}" - register: result - retries: 10 - delay: 30 - until: - - result.resources.0.metadata.annotations["openshift.io/deployment.phase"] in ("Failed", "Complete") + # Prune from one namespace should not have any effect on others namespaces + - name: Prune deployments from 2nd namespace + community.okd.openshift_adm_prune_deployments: + namespace: "{{ deployment_ns_2 }}" + check_mode: yes + register: test_prune - # Prune from one namespace should not have any effect on others namespaces - - name: Prune deployments from 2nd namespace - community.okd.openshift_adm_prune_deployments: - namespace: "{{ deployment_ns_2 }}" - check_mode: yes - register: test_prune + - name: Assure that candidate ReplicationController was found for pruning + assert: + that: + - test_prune is changed + - test_prune.replication_controllers | length == 1 + - "test_prune.replication_controllers.0.metadata.namespace == deployment_ns_2" - - name: Assure that candidate ReplicationController was found for pruning - assert: - that: - - test_prune is changed - - test_prune.replication_controllers | length == 1 - - "test_prune.replication_controllers.0.metadata.namespace == deployment_ns_2" + # Prune without namespace option + - name: Prune from all namespace should update more deployments + community.okd.openshift_adm_prune_deployments: + check_mode: yes + register: no_namespace_prune - # Prune without namespace option - - name: Prune from all namespace should update more deployments - community.okd.openshift_adm_prune_deployments: - check_mode: yes - register: no_namespace_prune + - name: Assure multiple ReplicationController were found for pruning + assert: + that: + - no_namespace_prune is changed + - no_namespace_prune.replication_controllers | length == 2 - - name: Assure multiple ReplicationController were found for pruning - assert: - that: - - no_namespace_prune is changed - - no_namespace_prune.replication_controllers | length == 2 - - # Execute Prune from 2nd namespace - - name: Read ReplicationController before Prune operation - kubernetes.core.k8s_info: - kind: ReplicationController - namespace: "{{ deployment_ns_2 }}" - register: replications + # Execute Prune from 2nd namespace + - name: Read ReplicationController before Prune operation + kubernetes.core.k8s_info: + kind: ReplicationController + namespace: "{{ deployment_ns_2 }}" + register: replications - - assert: - that: - - replications.resources | length == 1 + - assert: + that: + - replications.resources | length == 1 - - name: Prune DeploymentConfig from 2nd namespace - community.okd.openshift_adm_prune_deployments: - namespace: "{{ deployment_ns_2 }}" - register: _prune + - name: Prune DeploymentConfig from 2nd namespace + community.okd.openshift_adm_prune_deployments: + namespace: "{{ deployment_ns_2 }}" + register: _prune - - name: Assert DeploymentConfig was deleted - assert: - that: - - _prune is changed - - _prune.replication_controllers | length == 1 - - _prune.replication_controllers.0.details.name == replications.resources.0.metadata.name + - name: Assert DeploymentConfig was deleted + assert: + that: + - _prune is changed + - _prune.replication_controllers | length == 1 + - _prune.replication_controllers.0.details.name == replications.resources.0.metadata.name - # Execute Prune without namespace option - - name: Read ReplicationController before Prune operation - kubernetes.core.k8s_info: - kind: ReplicationController - namespace: "{{ deployment_ns }}" - register: replications + # Execute Prune without namespace option + - name: Read ReplicationController before Prune operation + kubernetes.core.k8s_info: + kind: ReplicationController + namespace: "{{ deployment_ns }}" + register: replications - - assert: - that: - - replications.resources | length == 1 + - assert: + that: + - replications.resources | length == 1 - - name: Prune from all namespace should update more deployments - community.okd.openshift_adm_prune_deployments: - register: _prune + - name: Prune from all namespace should update more deployments + community.okd.openshift_adm_prune_deployments: + register: _prune - - name: Assure multiple ReplicationController were found for pruning - assert: - that: - - _prune is changed - - _prune.replication_controllers | length > 0 + - name: Assure multiple ReplicationController were found for pruning + assert: + that: + - _prune is changed + - _prune.replication_controllers | length > 0 always: - name: Delete 1st namespace diff --git a/ansible_collections/community/okd/molecule/default/tasks/openshift_builds.yml b/ansible_collections/community/okd/molecule/default/tasks/openshift_builds.yml index b564f8bcd..2acbac45e 100644 --- a/ansible_collections/community/okd/molecule/default/tasks/openshift_builds.yml +++ b/ansible_collections/community/okd/molecule/default/tasks/openshift_builds.yml @@ -1,240 +1,245 @@ +--- - block: - - set_fact: - build_ns: "builds" - build_config: "start-build" - is_name: "ruby" - prune_build: "prune-build" - - - name: Ensure namespace - kubernetes.core.k8s: - kind: Namespace - name: "{{ build_ns }}" - - - name: Create ImageStream - community.okd.k8s: - namespace: "{{ build_ns }}" - definition: - apiVersion: image.openshift.io/v1 - kind: ImageStream - metadata: - name: "{{ is_name }}" - spec: - lookupPolicy: - local: false - tags: [] - - - name: Create build configuration - community.okd.k8s: - namespace: "{{ build_ns }}" - definition: - kind: BuildConfig - apiVersion: build.openshift.io/v1 - metadata: - name: "{{ build_config }}" - spec: - source: - dockerfile: | - FROM openshift/ruby-22-centos7 - RUN sleep 60s - USER ansible - strategy: - type: Docker - output: - to: - kind: "ImageStreamTag" - name: "{{ is_name }}:latest" - - - name: Start Build from Build configuration - community.okd.openshift_build: - namespace: "{{ build_ns }}" - build_config_name: "{{ build_config }}" - register: new_build - - - name: Assert that a build has been created - assert: - that: - - new_build is changed - - new_build.builds.0.metadata.name == "{{ build_config }}-1" - - - name: Start a new Build from previous Build - community.okd.openshift_build: - namespace: "{{ build_ns }}" - build_name: "{{ new_build.builds.0.metadata.name }}" - register: rerun_build - - - name: Assert that another build has been created - assert: - that: - - rerun_build is changed - - rerun_build.builds.0.metadata.name == "{{ build_config }}-2" - - - name: Cancel first build created - community.okd.openshift_build: - namespace: "{{ build_ns }}" - build_name: "{{ build_config }}-1" - state: cancelled - wait: yes - register: cancel - - - name: Assert that the Build was cancelled - assert: - that: - - cancel is changed - - cancel.builds | length == 1 - - cancel.builds.0.metadata.name == "{{ build_config }}-1" - - cancel.builds.0.metadata.namespace == "{{ build_ns }}" - - cancel.builds.0.status.cancelled - - - name: Get Build info - kubernetes.core.k8s_info: - version: build.openshift.io/v1 - kind: Build - namespace: "{{ build_ns }}" - name: "{{ cancel.builds.0.metadata.name }}" - register: build - - - name: Assert that build phase is cancelled - assert: - that: - - build.resources | length == 1 - - build.resources.0.status.cancelled - - build.resources.0.status.phase == 'Cancelled' - - - name: Cancel and restart Build using build config name - community.okd.openshift_build: - namespace: "{{ build_ns }}" - build_config_name: "{{ build_config }}" - state: restarted - build_phases: - - Running - - New - register: restart - - - name: assert that new build was created - assert: - that: - - restart is changed - - restart.builds | length == 1 - - 'restart.builds.0.metadata.name == "{{ build_config }}-3"' - - - name: Get Build 2 info - kubernetes.core.k8s_info: - version: build.openshift.io/v1 - kind: Build - namespace: "{{ build_ns }}" - name: "{{ build_config }}-2" - register: build - - - name: Assert that build phase is cancelled - assert: - that: - - build.resources | length == 1 - - build.resources.0.status.cancelled - - build.resources.0.status.phase == 'Cancelled' - - - name: Get Build info - kubernetes.core.k8s_info: - version: build.openshift.io/v1 - kind: Build - namespace: "{{ build_ns }}" - name: "{{ build_config }}-3" - register: build - - - name: Assert that Build is not cancelled - assert: - that: - - build.resources | length == 1 - - '"cancelled" not in build.resources.0.status' - - "build.resources.0.status.phase in ('New', 'Pending', 'Running')" - - - name: Prune Builds keep younger than 30min - community.okd.openshift_adm_prune_builds: - keep_younger_than: 30 - namespace: "{{ build_ns }}" - register: prune - check_mode: yes - - - name: Assert that no Builds were found - assert: - that: - - not prune.changed - - prune.builds | length == 0 - - - name: Prune Builds without namespace - community.okd.openshift_adm_prune_builds: - register: prune_without_ns - check_mode: yes - - - name: Assert that completed build are candidate for prune - assert: - that: - - prune_without_ns is changed - - prune_without_ns.builds | length > 0 - - '"{{ build_config }}-1" in build_names' - - '"{{ build_config }}-2" in build_names' - vars: - build_names: '{{ prune_without_ns.builds | map(attribute="metadata") | flatten | map(attribute="name") | list }}' - - - name: Prune Builds using namespace - community.okd.openshift_adm_prune_builds: - namespace: "{{ build_ns }}" - register: prune_with_ns - check_mode: yes - - - name: Assert that prune operation found the completed build - assert: - that: - - prune_with_ns is changed - - prune_with_ns.builds | length == 2 - - - name: Check Build before prune - kubernetes.core.k8s_info: - kind: Build - api_version: build.openshift.io/v1 - name: "{{ build_config }}-1" - namespace: "{{ build_ns }}" - register: resource - - - name: Validate that any previous build operation executed with check_mode did not deleted the build - assert: - that: - - resource.resources | length == 1 - - - name: Execute prune operation - community.okd.openshift_adm_prune_builds: - namespace: "{{ build_ns }}" - register: prune - - - name: assert prune is changed - assert: - that: - - prune is changed - - - name: Check Build - kubernetes.core.k8s_info: - kind: Build - api_version: build.openshift.io/v1 - name: "{{ build_config }}-1" - namespace: "{{ build_ns }}" - register: resource - - - name: Assert that the Build does not exist anymore - assert: - that: - - resource.resources | length == 0 - - - name: Check Build - kubernetes.core.k8s_info: - kind: Build - api_version: build.openshift.io/v1 - name: "{{ build_config }}-2" - namespace: "{{ build_ns }}" - register: resource - - - name: Assert that the Build does not exist anymore - assert: - that: - - resource.resources | length == 0 + - set_fact: + build_ns: "builds" + build_config: "start-build" + is_name: "ruby" + prune_build: "prune-build" + + - name: Ensure namespace + kubernetes.core.k8s: + kind: Namespace + name: "{{ build_ns }}" + + - name: Create ImageStream + community.okd.k8s: + namespace: "{{ build_ns }}" + definition: + apiVersion: image.openshift.io/v1 + kind: ImageStream + metadata: + name: "{{ is_name }}" + spec: + lookupPolicy: + local: false + tags: [] + + - name: Create build configuration + community.okd.k8s: + namespace: "{{ build_ns }}" + definition: + kind: BuildConfig + apiVersion: build.openshift.io/v1 + metadata: + name: "{{ build_config }}" + spec: + source: + dockerfile: | + FROM openshift/ruby-22-centos7 + RUN sleep 60s + USER ansible + strategy: + type: Docker + output: + to: + kind: "ImageStreamTag" + name: "{{ is_name }}:latest" + + - name: Start Build from Build configuration + community.okd.openshift_build: + namespace: "{{ build_ns }}" + build_config_name: "{{ build_config }}" + register: new_build + + - name: Assert that a build has been created + assert: + that: + - new_build is changed + - new_build.builds.0.metadata.name == "{{ build_config }}-1" + + - name: Start a new Build from previous Build + community.okd.openshift_build: + namespace: "{{ build_ns }}" + build_name: "{{ new_build.builds.0.metadata.name }}" + register: rerun_build + + - name: Assert that another build has been created + assert: + that: + - rerun_build is changed + - rerun_build.builds.0.metadata.name == "{{ build_config }}-2" + + - name: Cancel first build created + community.okd.openshift_build: + namespace: "{{ build_ns }}" + build_name: "{{ build_config }}-1" + state: cancelled + wait: yes + register: cancel + + - name: Assert that the Build was cancelled + assert: + that: + - cancel is changed + - cancel.builds | length == 1 + - cancel.builds.0.metadata.name == "{{ build_config }}-1" + - cancel.builds.0.metadata.namespace == "{{ build_ns }}" + - '"cancelled" in cancel.builds.0.status' + - cancel.builds.0.status.cancelled + + - name: Get info for 1st Build + kubernetes.core.k8s_info: + version: build.openshift.io/v1 + kind: Build + namespace: "{{ build_ns }}" + name: "{{ cancel.builds.0.metadata.name }}" + register: build + + - name: Assert that build phase is cancelled + assert: + that: + - build.resources | length == 1 + - '"cancelled" in build.resources.0.status' + - build.resources.0.status.cancelled + - build.resources.0.status.phase == 'Cancelled' + + - name: Cancel and restart Build using build config name + community.okd.openshift_build: + namespace: "{{ build_ns }}" + build_config_name: "{{ build_config }}" + state: restarted + build_phases: + - Pending + - Running + - New + register: restart + + - name: assert that new build was created + assert: + that: + - restart is changed + - restart.builds | length == 1 + - 'restart.builds.0.metadata.name == "{{ build_config }}-3"' + + - name: Get info for 2nd Build + kubernetes.core.k8s_info: + version: build.openshift.io/v1 + kind: Build + namespace: "{{ build_ns }}" + name: "{{ build_config }}-2" + register: build + + - name: Assert that build phase is cancelled + assert: + that: + - build.resources | length == 1 + - '"cancelled" in build.resources.0.status' + - build.resources.0.status.cancelled + - build.resources.0.status.phase == 'Cancelled' + + - name: Get info for 3rd build + kubernetes.core.k8s_info: + version: build.openshift.io/v1 + kind: Build + namespace: "{{ build_ns }}" + name: "{{ build_config }}-3" + register: build + + - name: Assert that Build is not cancelled + assert: + that: + - build.resources | length == 1 + - '"cancelled" not in build.resources.0.status' + - "build.resources.0.status.phase in ('New', 'Pending', 'Running')" + + - name: Prune Builds keep younger than 30min + community.okd.openshift_adm_prune_builds: + keep_younger_than: 30 + namespace: "{{ build_ns }}" + register: prune + check_mode: yes + + - name: Assert that no Builds were found + assert: + that: + - not prune.changed + - prune.builds | length == 0 + + - name: Prune Builds without namespace + community.okd.openshift_adm_prune_builds: + register: prune_without_ns + check_mode: yes + + - name: Assert that completed build are candidate for prune + assert: + that: + - prune_without_ns is changed + - prune_without_ns.builds | length > 0 + - '"{{ build_config }}-1" in build_names' + - '"{{ build_config }}-2" in build_names' + vars: + build_names: '{{ prune_without_ns.builds | map(attribute="metadata") | flatten | map(attribute="name") | list }}' + + - name: Prune Builds using namespace + community.okd.openshift_adm_prune_builds: + namespace: "{{ build_ns }}" + register: prune_with_ns + check_mode: yes + + - name: Assert that prune operation found the completed build + assert: + that: + - prune_with_ns is changed + - prune_with_ns.builds | length == 2 + + - name: Check Build before prune + kubernetes.core.k8s_info: + kind: Build + api_version: build.openshift.io/v1 + name: "{{ build_config }}-1" + namespace: "{{ build_ns }}" + register: resource + + - name: Validate that any previous build operation executed with check_mode did not deleted the build + assert: + that: + - resource.resources | length == 1 + + - name: Execute prune operation + community.okd.openshift_adm_prune_builds: + namespace: "{{ build_ns }}" + register: prune + + - name: assert prune is changed + assert: + that: + - prune is changed + + - name: Check Build + kubernetes.core.k8s_info: + kind: Build + api_version: build.openshift.io/v1 + name: "{{ build_config }}-1" + namespace: "{{ build_ns }}" + register: resource + + - name: Assert that the Build does not exist anymore + assert: + that: + - resource.resources | length == 0 + + - name: Check Build + kubernetes.core.k8s_info: + kind: Build + api_version: build.openshift.io/v1 + name: "{{ build_config }}-2" + namespace: "{{ build_ns }}" + register: resource + + - name: Assert that the Build does not exist anymore + assert: + that: + - resource.resources | length == 0 always: - name: Ensure namespace is deleted diff --git a/ansible_collections/community/okd/molecule/default/tasks/openshift_import_images.yml b/ansible_collections/community/okd/molecule/default/tasks/openshift_import_images.yml index 04392bb26..76c49f0a1 100644 --- a/ansible_collections/community/okd/molecule/default/tasks/openshift_import_images.yml +++ b/ansible_collections/community/okd/molecule/default/tasks/openshift_import_images.yml @@ -1,174 +1,175 @@ +--- - name: Openshift import image testing block: - - set_fact: - test_ns: "import-images" - - - name: Ensure namespace - community.okd.k8s: - kind: Namespace - name: '{{ test_ns }}' - - - name: Import image using tag (should import latest tag only) - community.okd.openshift_import_image: - namespace: "{{ test_ns }}" - name: "ansible/awx" - check_mode: yes - register: import_tag - - - name: Assert only latest was imported - assert: - that: - - import_tag is changed - - import_tag.result | length == 1 - - import_tag.result.0.spec.import - - import_tag.result.0.spec.images.0.from.kind == "DockerImage" - - import_tag.result.0.spec.images.0.from.name == "ansible/awx" - - - name: check image stream - kubernetes.core.k8s_info: - kind: ImageStream - namespace: "{{ test_ns }}" - name: awx - register: resource - - - name: assert that image stream is not created when using check_mode=yes - assert: - that: - - resource.resources == [] - - - name: Import image using tag (should import latest tag only) - community.okd.openshift_import_image: - namespace: "{{ test_ns }}" - name: "ansible/awx" - register: import_tag - - - name: Assert only latest was imported - assert: - that: - - import_tag is changed - - - name: check image stream - kubernetes.core.k8s_info: - kind: ImageStream - namespace: "{{ test_ns }}" - name: awx - register: resource - - - name: assert that image stream contains only tag latest - assert: - that: - - resource.resources | length == 1 - - resource.resources.0.status.tags.0.tag == 'latest' - - - name: Import once again the latest tag - community.okd.openshift_import_image: - namespace: "{{ test_ns }}" - name: "ansible/awx" - register: import_tag - - - name: assert change was performed - assert: - that: - - import_tag is changed - - - name: check image stream - kubernetes.core.k8s_info: - kind: ImageStream - version: image.openshift.io/v1 - namespace: "{{ test_ns }}" - name: awx - register: resource - - - name: assert that image stream still contains unique tag - assert: - that: - - resource.resources | length == 1 - - resource.resources.0.status.tags.0.tag == 'latest' - - - name: Import another tags - community.okd.openshift_import_image: - namespace: "{{ test_ns }}" - name: "ansible/awx:17.1.0" - register: import_another_tag - ignore_errors: yes - - - name: assert that another tag was imported - assert: - that: - - import_another_tag is failed - - '"the tag 17.1.0 does not exist on the image stream" in import_another_tag.msg' - - - name: Create simple ImageStream (without docker external container) - community.okd.k8s: - namespace: "{{ test_ns }}" - name: "local-is" - definition: - apiVersion: image.openshift.io/v1 + - set_fact: + test_ns: "import-images" + + - name: Ensure namespace + community.okd.k8s: + kind: Namespace + name: '{{ test_ns }}' + + - name: Import image using tag (should import latest tag only) + community.okd.openshift_import_image: + namespace: "{{ test_ns }}" + name: "ansible/awx" + check_mode: yes + register: import_tag + + - name: Assert only latest was imported + assert: + that: + - import_tag is changed + - import_tag.result | length == 1 + - import_tag.result.0.spec.import + - import_tag.result.0.spec.images.0.from.kind == "DockerImage" + - import_tag.result.0.spec.images.0.from.name == "ansible/awx" + + - name: check image stream + kubernetes.core.k8s_info: + kind: ImageStream + namespace: "{{ test_ns }}" + name: awx + register: resource + + - name: assert that image stream is not created when using check_mode=yes + assert: + that: + - resource.resources == [] + + - name: Import image using tag (should import latest tag only) + community.okd.openshift_import_image: + namespace: "{{ test_ns }}" + name: "ansible/awx" + register: import_tag + + - name: Assert only latest was imported + assert: + that: + - import_tag is changed + + - name: check image stream + kubernetes.core.k8s_info: + kind: ImageStream + namespace: "{{ test_ns }}" + name: awx + register: resource + + - name: assert that image stream contains only tag latest + assert: + that: + - resource.resources | length == 1 + - resource.resources.0.status.tags.0.tag == 'latest' + + - name: Import once again the latest tag + community.okd.openshift_import_image: + namespace: "{{ test_ns }}" + name: "ansible/awx" + register: import_tag + + - name: assert change was performed + assert: + that: + - import_tag is changed + + - name: check image stream + kubernetes.core.k8s_info: + kind: ImageStream + version: image.openshift.io/v1 + namespace: "{{ test_ns }}" + name: awx + register: resource + + - name: assert that image stream still contains unique tag + assert: + that: + - resource.resources | length == 1 + - resource.resources.0.status.tags.0.tag == 'latest' + + - name: Import another tags + community.okd.openshift_import_image: + namespace: "{{ test_ns }}" + name: "ansible/awx:17.1.0" + register: import_another_tag + ignore_errors: yes + + - name: assert that another tag was imported + assert: + that: + - import_another_tag is failed + - '"the tag 17.1.0 does not exist on the image stream" in import_another_tag.msg' + + - name: Create simple ImageStream (without docker external container) + community.okd.k8s: + namespace: "{{ test_ns }}" + name: "local-is" + definition: + apiVersion: image.openshift.io/v1 + kind: ImageStream + spec: + lookupPolicy: + local: false + tags: [] + + - name: Import all tag for image stream not pointing on external container image should failed + community.okd.openshift_import_image: + namespace: "{{ test_ns }}" + name: "local-is" + all: true + register: error_tag + ignore_errors: true + check_mode: yes + + - name: Assert module cannot import from non-existing tag from ImageStream + assert: + that: + - error_tag is failed + - 'error_tag.msg == "image stream {{ test_ns }}/local-is does not have tags pointing to external container images"' + + - name: import all tags for container image ibmcom/pause and specific tag for redhat/ubi8-micro + community.okd.openshift_import_image: + namespace: "{{ test_ns }}" + name: + - "ibmcom/pause" + - "redhat/ubi8-micro:8.5-437" + all: true + register: multiple_import + + - name: Assert that import succeed + assert: + that: + - multiple_import is changed + - multiple_import.result | length == 2 + + - name: Read ibmcom/pause ImageStream + kubernetes.core.k8s_info: + version: image.openshift.io/v1 + kind: ImageStream + namespace: "{{ test_ns }}" + name: pause + register: pause + + - name: assert that ibmcom/pause has multiple tags + assert: + that: + - pause.resources | length == 1 + - pause.resources.0.status.tags | length > 1 + + - name: Read redhat/ubi8-micro ImageStream + kubernetes.core.k8s_info: + version: image.openshift.io/v1 kind: ImageStream - spec: - lookupPolicy: - local: false - tags: [] - - - name: Import all tag for image stream not pointing on external container image should failed - community.okd.openshift_import_image: - namespace: "{{ test_ns }}" - name: "local-is" - all: true - register: error_tag - ignore_errors: true - check_mode: yes - - - name: Assert module cannot import from non-existing tag from ImageStream - assert: - that: - - error_tag is failed - - 'error_tag.msg == "image stream {{ test_ns }}/local-is does not have tags pointing to external container images"' - - - name: import all tags for container image ibmcom/pause and specific tag for redhat/ubi8-micro - community.okd.openshift_import_image: - namespace: "{{ test_ns }}" - name: - - "ibmcom/pause" - - "redhat/ubi8-micro:8.5-437" - all: true - register: multiple_import - - - name: Assert that import succeed - assert: - that: - - multiple_import is changed - - multiple_import.result | length == 2 - - - name: Read ibmcom/pause ImageStream - kubernetes.core.k8s_info: - version: image.openshift.io/v1 - kind: ImageStream - namespace: "{{ test_ns }}" - name: pause - register: pause - - - name: assert that ibmcom/pause has multiple tags - assert: - that: - - pause.resources | length == 1 - - pause.resources.0.status.tags | length > 1 - - - name: Read redhat/ubi8-micro ImageStream - kubernetes.core.k8s_info: - version: image.openshift.io/v1 - kind: ImageStream - namespace: "{{ test_ns }}" - name: ubi8-micro - register: resource - - - name: assert that redhat/ubi8-micro has only one tag - assert: - that: - - resource.resources | length == 1 - - resource.resources.0.status.tags | length == 1 - - 'resource.resources.0.status.tags.0.tag == "8.5-437"' + namespace: "{{ test_ns }}" + name: ubi8-micro + register: resource + + - name: assert that redhat/ubi8-micro has only one tag + assert: + that: + - resource.resources | length == 1 + - resource.resources.0.status.tags | length == 1 + - 'resource.resources.0.status.tags.0.tag == "8.5-437"' always: - name: Delete testing namespace diff --git a/ansible_collections/community/okd/molecule/default/tasks/openshift_prune_images.yml b/ansible_collections/community/okd/molecule/default/tasks/openshift_prune_images.yml index 86630da69..1748522ec 100644 --- a/ansible_collections/community/okd/molecule/default/tasks/openshift_prune_images.yml +++ b/ansible_collections/community/okd/molecule/default/tasks/openshift_prune_images.yml @@ -38,12 +38,12 @@ name: "{{ pod_name }}" spec: containers: - - name: test-container - image: "{{ prune_registry }}/{{ prune_ns }}/{{ container.name }}:latest" - command: - - /bin/sh - - -c - - while true;do date;sleep 5; done + - name: test-container + image: "{{ prune_registry }}/{{ prune_ns }}/{{ container.name }}:latest" + command: + - /bin/sh + - -c + - while true;do date;sleep 5; done - name: Create limit range for images size community.okd.k8s: @@ -57,7 +57,7 @@ - type: openshift.io/Image max: storage: 1Gi - + - name: Prune images from namespace community.okd.openshift_adm_prune_images: registry_url: "{{ prune_registry }}" diff --git a/ansible_collections/community/okd/molecule/default/tasks/openshift_route.yml b/ansible_collections/community/okd/molecule/default/tasks/openshift_route.yml index 50056b7e4..b3043cb7e 100644 --- a/ansible_collections/community/okd/molecule/default/tasks/openshift_route.yml +++ b/ansible_collections/community/okd/molecule/default/tasks/openshift_route.yml @@ -19,10 +19,10 @@ app: hello-kubernetes spec: containers: - - name: hello-kubernetes - image: docker.io/openshift/hello-openshift - ports: - - containerPort: 8080 + - name: hello-kubernetes + image: docker.io/openshift/hello-openshift + ports: + - containerPort: 8080 - name: Create Service community.okd.k8s: @@ -35,8 +35,8 @@ namespace: default spec: ports: - - port: 80 - targetPort: 8080 + - port: 80 + targetPort: 8080 selector: app: hello-kubernetes diff --git a/ansible_collections/community/okd/molecule/default/vars/main.yml b/ansible_collections/community/okd/molecule/default/vars/main.yml index 66fb0d33c..22e227c1f 100644 --- a/ansible_collections/community/okd/molecule/default/vars/main.yml +++ b/ansible_collections/community/okd/molecule/default/vars/main.yml @@ -64,14 +64,16 @@ okd_dc_triggers: okd_dc_spec: template: '{{ k8s_pod_template }}' - triggers: '{{ okd_dc_triggers }}' + selector: + matchLabels: + app: "{{ k8s_pod_name }}" replicas: 1 strategy: type: Recreate okd_dc_template: - apiVersion: v1 - kind: DeploymentConfig + apiVersion: apps/v1 + kind: Deployment spec: '{{ okd_dc_spec }}' okd_imagestream_template: @@ -83,12 +85,12 @@ okd_imagestream_template: lookupPolicy: local: true tags: - - annotations: null - from: - kind: DockerImage - name: '{{ image }}' - name: '{{ image_tag }}' - referencePolicy: - type: Source + - annotations: null + from: + kind: DockerImage + name: '{{ image }}' + name: '{{ image_tag }}' + referencePolicy: + type: Source image_tag: latest diff --git a/ansible_collections/community/okd/plugins/connection/oc.py b/ansible_collections/community/okd/plugins/connection/oc.py index 44236a11a..ae399e66b 100644 --- a/ansible_collections/community/okd/plugins/connection/oc.py +++ b/ansible_collections/community/okd/plugins/connection/oc.py @@ -17,10 +17,11 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -from __future__ import (absolute_import, division, print_function) +from __future__ import absolute_import, division, print_function + __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = """ author: - xuxinkun (@xuxinkun) @@ -145,29 +146,32 @@ DOCUMENTATION = ''' env: - name: K8S_AUTH_VERIFY_SSL aliases: [ oc_verify_ssl ] -''' +""" -from ansible_collections.kubernetes.core.plugins.connection.kubectl import Connection as KubectlConnection +from ansible_collections.kubernetes.core.plugins.connection.kubectl import ( + Connection as KubectlConnection, +) -CONNECTION_TRANSPORT = 'oc' +CONNECTION_TRANSPORT = "oc" CONNECTION_OPTIONS = { - 'oc_container': '-c', - 'oc_namespace': '-n', - 'oc_kubeconfig': '--kubeconfig', - 'oc_context': '--context', - 'oc_host': '--server', - 'client_cert': '--client-certificate', - 'client_key': '--client-key', - 'ca_cert': '--certificate-authority', - 'validate_certs': '--insecure-skip-tls-verify', - 'oc_token': '--token' + "oc_container": "-c", + "oc_namespace": "-n", + "oc_kubeconfig": "--kubeconfig", + "oc_context": "--context", + "oc_host": "--server", + "client_cert": "--client-certificate", + "client_key": "--client-key", + "ca_cert": "--certificate-authority", + "validate_certs": "--insecure-skip-tls-verify", + "oc_token": "--token", } class Connection(KubectlConnection): - ''' Local oc based connections ''' + """Local oc based connections""" + transport = CONNECTION_TRANSPORT connection_options = CONNECTION_OPTIONS documentation = DOCUMENTATION diff --git a/ansible_collections/community/okd/plugins/inventory/openshift.py b/ansible_collections/community/okd/plugins/inventory/openshift.py index f69c652fc..4ec788408 100644 --- a/ansible_collections/community/okd/plugins/inventory/openshift.py +++ b/ansible_collections/community/okd/plugins/inventory/openshift.py @@ -1,11 +1,11 @@ # Copyright (c) 2018 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) +from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = """ name: openshift author: - Chris Houseknecht (@chouseknecht) @@ -17,6 +17,13 @@ DOCUMENTATION = ''' - Groups by cluster name, namespace, namespace_services, namespace_pods, namespace_routes, and labels - Uses openshift.(yml|yaml) YAML configuration file to set parameter values. + deprecated: + removed_in: 4.0.0 + why: | + As discussed in U(https://github.com/ansible-collections/kubernetes.core/issues/31), we decided to + remove the openshift inventory plugin in release 4.0.0. + alternative: "Use M(kubernetes.core.k8s_info) and M(ansible.builtin.add_host) instead." + options: plugin: description: token that ensures this is a source file for the 'openshift' plugin. @@ -87,34 +94,41 @@ DOCUMENTATION = ''' - "python >= 3.6" - "kubernetes >= 12.0.0" - "PyYAML >= 3.11" -''' +""" -EXAMPLES = ''' +EXAMPLES = """ # File must be named openshift.yaml or openshift.yml -# Authenticate with token, and return all pods and services for all namespaces -plugin: community.okd.openshift -connections: - - host: https://192.168.64.4:8443 - api_key: xxxxxxxxxxxxxxxx - verify_ssl: false - -# Use default config (~/.kube/config) file and active context, and return objects for a specific namespace -plugin: community.okd.openshift -connections: - - namespaces: - - testing - -# Use a custom config file, and a specific context. -plugin: community.okd.openshift -connections: - - kubeconfig: /path/to/config - context: 'awx/192-168-64-4:8443/developer' -''' +- name: Authenticate with token, and return all pods and services for all namespaces + plugin: community.okd.openshift + connections: + - host: https://192.168.64.4:8443 + api_key: xxxxxxxxxxxxxxxx + verify_ssl: false + +- name: Use default config (~/.kube/config) file and active context, and return objects for a specific namespace + plugin: community.okd.openshift + connections: + - namespaces: + - testing + +- name: Use a custom config file, and a specific context. + plugin: community.okd.openshift + connections: + - kubeconfig: /path/to/config + context: 'awx/192-168-64-4:8443/developer' +""" try: - from ansible_collections.kubernetes.core.plugins.inventory.k8s import K8sInventoryException, InventoryModule as K8sInventoryModule, format_dynamic_api_exc - from ansible_collections.kubernetes.core.plugins.module_utils.k8s.client import get_api_client + from ansible_collections.kubernetes.core.plugins.inventory.k8s import ( + K8sInventoryException, + InventoryModule as K8sInventoryModule, + format_dynamic_api_exc, + ) + from ansible_collections.kubernetes.core.plugins.module_utils.k8s.client import ( + get_api_client, + ) + HAS_KUBERNETES_COLLECTION = True except ImportError as e: HAS_KUBERNETES_COLLECTION = False @@ -127,29 +141,38 @@ except ImportError: class InventoryModule(K8sInventoryModule): - NAME = 'community.okd.openshift' + NAME = "community.okd.openshift" - connection_plugin = 'community.okd.oc' - transport = 'oc' + connection_plugin = "community.okd.oc" + transport = "oc" def check_kubernetes_collection(self): - if not HAS_KUBERNETES_COLLECTION: - K8sInventoryException("The kubernetes.core collection must be installed") + raise K8sInventoryException( + "The kubernetes.core collection must be installed" + ) def fetch_objects(self, connections): self.check_kubernetes_collection() super(InventoryModule, self).fetch_objects(connections) + self.display.deprecated( + "The 'openshift' inventory plugin has been deprecated and will be removed in release 4.0.0", + version="4.0.0", + collection_name="community.okd", + ) + if connections: if not isinstance(connections, list): raise K8sInventoryException("Expecting connections to be a list.") for connection in connections: client = get_api_client(**connection) - name = connection.get('name', self.get_default_host_name(client.configuration.host)) - if connection.get('namespaces'): - namespaces = connection['namespaces'] + name = connection.get( + "name", self.get_default_host_name(client.configuration.host) + ) + if connection.get("namespaces"): + namespaces = connection["namespaces"] else: namespaces = self.get_available_namespaces(client) for namespace in namespaces: @@ -163,15 +186,19 @@ class InventoryModule(K8sInventoryModule): def get_routes_for_namespace(self, client, name, namespace): self.check_kubernetes_collection() - v1_route = client.resources.get(api_version='route.openshift.io/v1', kind='Route') + v1_route = client.resources.get( + api_version="route.openshift.io/v1", kind="Route" + ) try: obj = v1_route.get(namespace=namespace) except DynamicApiError as exc: self.display.debug(exc) - raise K8sInventoryException('Error fetching Routes list: %s' % format_dynamic_api_exc(exc)) + raise K8sInventoryException( + "Error fetching Routes list: %s" % format_dynamic_api_exc(exc) + ) - namespace_group = 'namespace_{0}'.format(namespace) - namespace_routes_group = '{0}_routes'.format(namespace_group) + namespace_group = "namespace_{0}".format(namespace) + namespace_routes_group = "{0}_routes".format(namespace_group) self.inventory.add_group(name) self.inventory.add_group(namespace_group) @@ -180,14 +207,18 @@ class InventoryModule(K8sInventoryModule): self.inventory.add_child(namespace_group, namespace_routes_group) for route in obj.items: route_name = route.metadata.name - route_annotations = {} if not route.metadata.annotations else dict(route.metadata.annotations) + route_annotations = ( + {} + if not route.metadata.annotations + else dict(route.metadata.annotations) + ) self.inventory.add_host(route_name) if route.metadata.labels: # create a group for each label_value for key, value in route.metadata.labels: - group_name = 'label_{0}_{1}'.format(key, value) + group_name = "label_{0}_{1}".format(key, value) self.inventory.add_group(group_name) self.inventory.add_child(group_name, route_name) route_labels = dict(route.metadata.labels) @@ -197,19 +228,25 @@ class InventoryModule(K8sInventoryModule): self.inventory.add_child(namespace_routes_group, route_name) # add hostvars - self.inventory.set_variable(route_name, 'labels', route_labels) - self.inventory.set_variable(route_name, 'annotations', route_annotations) - self.inventory.set_variable(route_name, 'cluster_name', route.metadata.clusterName) - self.inventory.set_variable(route_name, 'object_type', 'route') - self.inventory.set_variable(route_name, 'self_link', route.metadata.selfLink) - self.inventory.set_variable(route_name, 'resource_version', route.metadata.resourceVersion) - self.inventory.set_variable(route_name, 'uid', route.metadata.uid) + self.inventory.set_variable(route_name, "labels", route_labels) + self.inventory.set_variable(route_name, "annotations", route_annotations) + self.inventory.set_variable( + route_name, "cluster_name", route.metadata.clusterName + ) + self.inventory.set_variable(route_name, "object_type", "route") + self.inventory.set_variable( + route_name, "self_link", route.metadata.selfLink + ) + self.inventory.set_variable( + route_name, "resource_version", route.metadata.resourceVersion + ) + self.inventory.set_variable(route_name, "uid", route.metadata.uid) if route.spec.host: - self.inventory.set_variable(route_name, 'host', route.spec.host) + self.inventory.set_variable(route_name, "host", route.spec.host) if route.spec.path: - self.inventory.set_variable(route_name, 'path', route.spec.path) + self.inventory.set_variable(route_name, "path", route.spec.path) - if hasattr(route.spec.port, 'targetPort') and route.spec.port.targetPort: - self.inventory.set_variable(route_name, 'port', dict(route.spec.port)) + if hasattr(route.spec.port, "targetPort") and route.spec.port.targetPort: + self.inventory.set_variable(route_name, "port", dict(route.spec.port)) diff --git a/ansible_collections/community/okd/plugins/module_utils/k8s.py b/ansible_collections/community/okd/plugins/module_utils/k8s.py index 87ec70d90..9c8f47fd2 100644 --- a/ansible_collections/community/okd/plugins/module_utils/k8s.py +++ b/ansible_collections/community/okd/plugins/module_utils/k8s.py @@ -1,35 +1,46 @@ #!/usr/bin/env python -from __future__ import (absolute_import, division, print_function) +from __future__ import absolute_import, division, print_function + __metaclass__ = type import re import operator from functools import reduce -from ansible_collections.community.okd.plugins.module_utils.openshift_common import AnsibleOpenshiftModule +from ansible_collections.community.okd.plugins.module_utils.openshift_common import ( + AnsibleOpenshiftModule, +) try: - from ansible_collections.kubernetes.core.plugins.module_utils.k8s.resource import create_definitions - from ansible_collections.kubernetes.core.plugins.module_utils.k8s.exceptions import CoreException + from ansible_collections.kubernetes.core.plugins.module_utils.k8s.resource import ( + create_definitions, + ) + from ansible_collections.kubernetes.core.plugins.module_utils.k8s.exceptions import ( + CoreException, + ) except ImportError: pass from ansible.module_utils._text import to_native try: - from kubernetes.dynamic.exceptions import DynamicApiError, NotFoundError, ForbiddenError + from kubernetes.dynamic.exceptions import ( + DynamicApiError, + NotFoundError, + ForbiddenError, + ) except ImportError as e: pass -TRIGGER_ANNOTATION = 'image.openshift.io/triggers' -TRIGGER_CONTAINER = re.compile(r"(?P.*)\[((?P[0-9]+)|\?\(@\.name==[\"'\\]*(?P[a-z0-9]([-a-z0-9]*[a-z0-9])?))") +TRIGGER_ANNOTATION = "image.openshift.io/triggers" +TRIGGER_CONTAINER = re.compile( + r"(?P.*)\[((?P[0-9]+)|\?\(@\.name==[\"'\\]*(?P[a-z0-9]([-a-z0-9]*[a-z0-9])?))" +) class OKDRawModule(AnsibleOpenshiftModule): - def __init__(self, **kwargs): - super(OKDRawModule, self).__init__(**kwargs) @property @@ -50,36 +61,60 @@ class OKDRawModule(AnsibleOpenshiftModule): result = {"changed": False, "result": {}} warnings = [] - if self.params.get("state") != 'absent': + if self.params.get("state") != "absent": existing = None name = definition.get("metadata", {}).get("name") namespace = definition.get("metadata", {}).get("namespace") - if definition.get("kind") in ['Project', 'ProjectRequest']: + if definition.get("kind") in ["Project", "ProjectRequest"]: try: - resource = self.svc.find_resource(kind=definition.get("kind"), api_version=definition.get("apiVersion", "v1")) - existing = resource.get(name=name, namespace=namespace).to_dict() + resource = self.svc.find_resource( + kind=definition.get("kind"), + api_version=definition.get("apiVersion", "v1"), + ) + existing = resource.get( + name=name, namespace=namespace + ).to_dict() except (NotFoundError, ForbiddenError): result = self.create_project_request(definition) changed |= result["changed"] results.append(result) continue except DynamicApiError as exc: - self.fail_json(msg='Failed to retrieve requested object: {0}'.format(exc.body), - error=exc.status, status=exc.status, reason=exc.reason) - - if definition.get("kind") not in ['Project', 'ProjectRequest']: + self.fail_json( + msg="Failed to retrieve requested object: {0}".format( + exc.body + ), + error=exc.status, + status=exc.status, + reason=exc.reason, + ) + + if definition.get("kind") not in ["Project", "ProjectRequest"]: try: - resource = self.svc.find_resource(kind=definition.get("kind"), api_version=definition.get("apiVersion", "v1")) - existing = resource.get(name=name, namespace=namespace).to_dict() + resource = self.svc.find_resource( + kind=definition.get("kind"), + api_version=definition.get("apiVersion", "v1"), + ) + existing = resource.get( + name=name, namespace=namespace + ).to_dict() except Exception: existing = None if existing: - if resource.kind == 'DeploymentConfig': - if definition.get('spec', {}).get('triggers'): - definition = self.resolve_imagestream_triggers(existing, definition) - elif existing['metadata'].get('annotations', {}).get(TRIGGER_ANNOTATION): - definition = self.resolve_imagestream_trigger_annotation(existing, definition) + if resource.kind == "DeploymentConfig": + if definition.get("spec", {}).get("triggers"): + definition = self.resolve_imagestream_triggers( + existing, definition + ) + elif ( + existing["metadata"] + .get("annotations", {}) + .get(TRIGGER_ANNOTATION) + ): + definition = self.resolve_imagestream_trigger_annotation( + existing, definition + ) if self.params.get("validate") is not None: warnings = self.validate(definition) @@ -116,13 +151,15 @@ class OKDRawModule(AnsibleOpenshiftModule): @staticmethod def get_index(desired, objects, keys): - """ Iterates over keys, returns the first object from objects where the value of the key - matches the value in desired + """Iterates over keys, returns the first object from objects where the value of the key + matches the value in desired """ # pylint: disable=use-a-generator # Use a generator instead 'all(desired.get(key, True) == item.get(key, False) for key in keys)' for i, item in enumerate(objects): - if item and all([desired.get(key, True) == item.get(key, False) for key in keys]): + if item and all( + [desired.get(key, True) == item.get(key, False) for key in keys] + ): return i def resolve_imagestream_trigger_annotation(self, existing, definition): @@ -137,84 +174,148 @@ class OKDRawModule(AnsibleOpenshiftModule): def set_from_fields(d, fields, value): get_from_fields(d, fields[:-1])[fields[-1]] = value - if TRIGGER_ANNOTATION in definition['metadata'].get('annotations', {}).keys(): - triggers = yaml.safe_load(definition['metadata']['annotations'][TRIGGER_ANNOTATION] or '[]') + if TRIGGER_ANNOTATION in definition["metadata"].get("annotations", {}).keys(): + triggers = yaml.safe_load( + definition["metadata"]["annotations"][TRIGGER_ANNOTATION] or "[]" + ) else: - triggers = yaml.safe_load(existing['metadata'].get('annotations', '{}').get(TRIGGER_ANNOTATION, '[]')) + triggers = yaml.safe_load( + existing["metadata"] + .get("annotations", "{}") + .get(TRIGGER_ANNOTATION, "[]") + ) if not isinstance(triggers, list): return definition for trigger in triggers: - if trigger.get('fieldPath'): - parsed = self.parse_trigger_fieldpath(trigger['fieldPath']) - path = parsed.get('path', '').split('.') + if trigger.get("fieldPath"): + parsed = self.parse_trigger_fieldpath(trigger["fieldPath"]) + path = parsed.get("path", "").split(".") if path: existing_containers = get_from_fields(existing, path) new_containers = get_from_fields(definition, path) - if parsed.get('name'): - existing_index = self.get_index({'name': parsed['name']}, existing_containers, ['name']) - new_index = self.get_index({'name': parsed['name']}, new_containers, ['name']) - elif parsed.get('index') is not None: - existing_index = new_index = int(parsed['index']) + if parsed.get("name"): + existing_index = self.get_index( + {"name": parsed["name"]}, existing_containers, ["name"] + ) + new_index = self.get_index( + {"name": parsed["name"]}, new_containers, ["name"] + ) + elif parsed.get("index") is not None: + existing_index = new_index = int(parsed["index"]) else: existing_index = new_index = None if existing_index is not None and new_index is not None: - if existing_index < len(existing_containers) and new_index < len(new_containers): - set_from_fields(definition, path + [new_index, 'image'], get_from_fields(existing, path + [existing_index, 'image'])) + if existing_index < len( + existing_containers + ) and new_index < len(new_containers): + set_from_fields( + definition, + path + [new_index, "image"], + get_from_fields( + existing, path + [existing_index, "image"] + ), + ) return definition def resolve_imagestream_triggers(self, existing, definition): - - existing_triggers = existing.get('spec', {}).get('triggers') - new_triggers = definition['spec']['triggers'] - existing_containers = existing.get('spec', {}).get('template', {}).get('spec', {}).get('containers', []) - new_containers = definition.get('spec', {}).get('template', {}).get('spec', {}).get('containers', []) + existing_triggers = existing.get("spec", {}).get("triggers") + new_triggers = definition["spec"]["triggers"] + existing_containers = ( + existing.get("spec", {}) + .get("template", {}) + .get("spec", {}) + .get("containers", []) + ) + new_containers = ( + definition.get("spec", {}) + .get("template", {}) + .get("spec", {}) + .get("containers", []) + ) for i, trigger in enumerate(new_triggers): - if trigger.get('type') == 'ImageChange' and trigger.get('imageChangeParams'): - names = trigger['imageChangeParams'].get('containerNames', []) + if trigger.get("type") == "ImageChange" and trigger.get( + "imageChangeParams" + ): + names = trigger["imageChangeParams"].get("containerNames", []) for name in names: - old_container_index = self.get_index({'name': name}, existing_containers, ['name']) - new_container_index = self.get_index({'name': name}, new_containers, ['name']) - if old_container_index is not None and new_container_index is not None: - image = existing['spec']['template']['spec']['containers'][old_container_index]['image'] - definition['spec']['template']['spec']['containers'][new_container_index]['image'] = image - - existing_index = self.get_index(trigger['imageChangeParams'], - [x.get('imageChangeParams') for x in existing_triggers], - ['containerNames']) + old_container_index = self.get_index( + {"name": name}, existing_containers, ["name"] + ) + new_container_index = self.get_index( + {"name": name}, new_containers, ["name"] + ) + if ( + old_container_index is not None + and new_container_index is not None + ): + image = existing["spec"]["template"]["spec"]["containers"][ + old_container_index + ]["image"] + definition["spec"]["template"]["spec"]["containers"][ + new_container_index + ]["image"] = image + + existing_index = self.get_index( + trigger["imageChangeParams"], + [x.get("imageChangeParams") for x in existing_triggers], + ["containerNames"], + ) if existing_index is not None: - existing_image = existing_triggers[existing_index].get('imageChangeParams', {}).get('lastTriggeredImage') + existing_image = ( + existing_triggers[existing_index] + .get("imageChangeParams", {}) + .get("lastTriggeredImage") + ) if existing_image: - definition['spec']['triggers'][i]['imageChangeParams']['lastTriggeredImage'] = existing_image - existing_from = existing_triggers[existing_index].get('imageChangeParams', {}).get('from', {}) - new_from = trigger['imageChangeParams'].get('from', {}) - existing_namespace = existing_from.get('namespace') - existing_name = existing_from.get('name', False) - new_name = new_from.get('name', True) - add_namespace = existing_namespace and 'namespace' not in new_from.keys() and existing_name == new_name + definition["spec"]["triggers"][i]["imageChangeParams"][ + "lastTriggeredImage" + ] = existing_image + existing_from = ( + existing_triggers[existing_index] + .get("imageChangeParams", {}) + .get("from", {}) + ) + new_from = trigger["imageChangeParams"].get("from", {}) + existing_namespace = existing_from.get("namespace") + existing_name = existing_from.get("name", False) + new_name = new_from.get("name", True) + add_namespace = ( + existing_namespace + and "namespace" not in new_from.keys() + and existing_name == new_name + ) if add_namespace: - definition['spec']['triggers'][i]['imageChangeParams']['from']['namespace'] = existing_from['namespace'] + definition["spec"]["triggers"][i]["imageChangeParams"][ + "from" + ]["namespace"] = existing_from["namespace"] return definition def parse_trigger_fieldpath(self, expression): parsed = TRIGGER_CONTAINER.search(expression).groupdict() - if parsed.get('index'): - parsed['index'] = int(parsed['index']) + if parsed.get("index"): + parsed["index"] = int(parsed["index"]) return parsed def create_project_request(self, definition): - definition['kind'] = 'ProjectRequest' - result = {'changed': False, 'result': {}} - resource = self.svc.find_resource(kind='ProjectRequest', api_version=definition['apiVersion'], fail=True) + definition["kind"] = "ProjectRequest" + result = {"changed": False, "result": {}} + resource = self.svc.find_resource( + kind="ProjectRequest", api_version=definition["apiVersion"], fail=True + ) if not self.check_mode: try: k8s_obj = resource.create(definition) - result['result'] = k8s_obj.to_dict() + result["result"] = k8s_obj.to_dict() except DynamicApiError as exc: - self.fail_json(msg="Failed to create object: {0}".format(exc.body), - error=exc.status, status=exc.status, reason=exc.reason) - result['changed'] = True - result['method'] = 'create' + self.fail_json( + msg="Failed to create object: {0}".format(exc.body), + error=exc.status, + status=exc.status, + reason=exc.reason, + ) + result["changed"] = True + result["method"] = "create" return result diff --git a/ansible_collections/community/okd/plugins/module_utils/openshift_adm_prune_auth.py b/ansible_collections/community/okd/plugins/module_utils/openshift_adm_prune_auth.py index e5143ae4e..f28640551 100644 --- a/ansible_collections/community/okd/plugins/module_utils/openshift_adm_prune_auth.py +++ b/ansible_collections/community/okd/plugins/module_utils/openshift_adm_prune_auth.py @@ -1,11 +1,14 @@ #!/usr/bin/env python -from __future__ import (absolute_import, division, print_function) +from __future__ import absolute_import, division, print_function + __metaclass__ = type from ansible.module_utils._text import to_native -from ansible_collections.community.okd.plugins.module_utils.openshift_common import AnsibleOpenshiftModule +from ansible_collections.community.okd.plugins.module_utils.openshift_common import ( + AnsibleOpenshiftModule, +) try: from kubernetes import client @@ -18,31 +21,36 @@ class OpenShiftAdmPruneAuth(AnsibleOpenshiftModule): def __init__(self, **kwargs): super(OpenShiftAdmPruneAuth, self).__init__(**kwargs) - def prune_resource_binding(self, kind, api_version, ref_kind, ref_namespace_names, propagation_policy=None): - + def prune_resource_binding( + self, kind, api_version, ref_kind, ref_namespace_names, propagation_policy=None + ): resource = self.find_resource(kind=kind, api_version=api_version, fail=True) candidates = [] for ref_namespace, ref_name in ref_namespace_names: try: result = resource.get(name=None, namespace=ref_namespace) result = result.to_dict() - result = result.get('items') if 'items' in result else [result] + result = result.get("items") if "items" in result else [result] for obj in result: - namespace = obj['metadata'].get('namespace', None) - name = obj['metadata'].get('name') - if ref_kind and obj['roleRef']['kind'] != ref_kind: + namespace = obj["metadata"].get("namespace", None) + name = obj["metadata"].get("name") + if ref_kind and obj["roleRef"]["kind"] != ref_kind: # skip this binding as the roleRef.kind does not match continue - if obj['roleRef']['name'] == ref_name: + if obj["roleRef"]["name"] == ref_name: # select this binding as the roleRef.name match candidates.append((namespace, name)) except NotFoundError: continue except DynamicApiError as exc: - msg = "Failed to get {kind} resource due to: {msg}".format(kind=kind, msg=exc.body) + msg = "Failed to get {kind} resource due to: {msg}".format( + kind=kind, msg=exc.body + ) self.fail_json(msg=msg) except Exception as e: - msg = "Failed to get {kind} due to: {msg}".format(kind=kind, msg=to_native(e)) + msg = "Failed to get {kind} due to: {msg}".format( + kind=kind, msg=to_native(e) + ) self.fail_json(msg=msg) if len(candidates) == 0 or self.check_mode: @@ -54,24 +62,29 @@ class OpenShiftAdmPruneAuth(AnsibleOpenshiftModule): for namespace, name in candidates: try: - result = resource.delete(name=name, namespace=namespace, body=delete_options) + result = resource.delete( + name=name, namespace=namespace, body=delete_options + ) except DynamicApiError as exc: - msg = "Failed to delete {kind} {namespace}/{name} due to: {msg}".format(kind=kind, namespace=namespace, name=name, msg=exc.body) + msg = "Failed to delete {kind} {namespace}/{name} due to: {msg}".format( + kind=kind, namespace=namespace, name=name, msg=exc.body + ) self.fail_json(msg=msg) except Exception as e: - msg = "Failed to delete {kind} {namespace}/{name} due to: {msg}".format(kind=kind, namespace=namespace, name=name, msg=to_native(e)) + msg = "Failed to delete {kind} {namespace}/{name} due to: {msg}".format( + kind=kind, namespace=namespace, name=name, msg=to_native(e) + ) self.fail_json(msg=msg) return [y if x is None else x + "/" + y for x, y in candidates] def update_resource_binding(self, ref_kind, ref_names, namespaced=False): - - kind = 'ClusterRoleBinding' - api_version = "rbac.authorization.k8s.io/v1", + kind = "ClusterRoleBinding" + api_version = "rbac.authorization.k8s.io/v1" if namespaced: kind = "RoleBinding" resource = self.find_resource(kind=kind, api_version=api_version, fail=True) result = resource.get(name=None, namespace=None).to_dict() - result = result.get('items') if 'items' in result else [result] + result = result.get("items") if "items" in result else [result] if len(result) == 0: return [], False @@ -79,29 +92,40 @@ class OpenShiftAdmPruneAuth(AnsibleOpenshiftModule): def _update_user_group(binding_namespace, subjects): users, groups = [], [] for x in subjects: - if x['kind'] == 'User': - users.append(x['name']) - elif x['kind'] == 'Group': - groups.append(x['name']) - elif x['kind'] == 'ServiceAccount': + if x["kind"] == "User": + users.append(x["name"]) + elif x["kind"] == "Group": + groups.append(x["name"]) + elif x["kind"] == "ServiceAccount": namespace = binding_namespace - if x.get('namespace') is not None: - namespace = x.get('namespace') + if x.get("namespace") is not None: + namespace = x.get("namespace") if namespace is not None: - users.append("system:serviceaccount:%s:%s" % (namespace, x['name'])) + users.append( + "system:serviceaccount:%s:%s" % (namespace, x["name"]) + ) return users, groups candidates = [] changed = False for item in result: - subjects = item.get('subjects', []) - retainedSubjects = [x for x in subjects if x['kind'] == ref_kind and x['name'] in ref_names] + subjects = item.get("subjects", []) + retainedSubjects = [ + x for x in subjects if x["kind"] == ref_kind and x["name"] in ref_names + ] if len(subjects) != len(retainedSubjects): updated_binding = item - updated_binding['subjects'] = retainedSubjects - binding_namespace = item['metadata'].get('namespace', None) - updated_binding['userNames'], updated_binding['groupNames'] = _update_user_group(binding_namespace, retainedSubjects) - candidates.append(binding_namespace + "/" + item['metadata']['name'] if binding_namespace else item['metadata']['name']) + updated_binding["subjects"] = retainedSubjects + binding_namespace = item["metadata"].get("namespace", None) + ( + updated_binding["userNames"], + updated_binding["groupNames"], + ) = _update_user_group(binding_namespace, retainedSubjects) + candidates.append( + binding_namespace + "/" + item["metadata"]["name"] + if binding_namespace + else item["metadata"]["name"] + ) changed = True if not self.check_mode: try: @@ -112,20 +136,25 @@ class OpenShiftAdmPruneAuth(AnsibleOpenshiftModule): return candidates, changed def update_security_context(self, ref_names, key): - params = {'kind': 'SecurityContextConstraints', 'api_version': 'security.openshift.io/v1'} + params = { + "kind": "SecurityContextConstraints", + "api_version": "security.openshift.io/v1", + } sccs = self.kubernetes_facts(**params) - if not sccs['api_found']: - self.fail_json(msg=sccs['msg']) - sccs = sccs.get('resources') + if not sccs["api_found"]: + self.fail_json(msg=sccs["msg"]) + sccs = sccs.get("resources") candidates = [] changed = False - resource = self.find_resource(kind="SecurityContextConstraints", api_version="security.openshift.io/v1") + resource = self.find_resource( + kind="SecurityContextConstraints", api_version="security.openshift.io/v1" + ) for item in sccs: subjects = item.get(key, []) retainedSubjects = [x for x in subjects if x not in ref_names] if len(subjects) != len(retainedSubjects): - candidates.append(item['metadata']['name']) + candidates.append(item["metadata"]["name"]) changed = True if not self.check_mode: upd_sec_ctx = item @@ -138,94 +167,116 @@ class OpenShiftAdmPruneAuth(AnsibleOpenshiftModule): return candidates, changed def auth_prune_roles(self): - params = {'kind': 'Role', 'api_version': 'rbac.authorization.k8s.io/v1', 'namespace': self.params.get('namespace')} - for attr in ('name', 'label_selectors'): + params = { + "kind": "Role", + "api_version": "rbac.authorization.k8s.io/v1", + "namespace": self.params.get("namespace"), + } + for attr in ("name", "label_selectors"): if self.params.get(attr): params[attr] = self.params.get(attr) result = self.kubernetes_facts(**params) - if not result['api_found']: - self.fail_json(msg=result['msg']) + if not result["api_found"]: + self.fail_json(msg=result["msg"]) - roles = result.get('resources') + roles = result.get("resources") if len(roles) == 0: - self.exit_json(changed=False, msg="No candidate rolebinding to prune from namespace %s." % self.params.get('namespace')) - - ref_roles = [(x['metadata']['namespace'], x['metadata']['name']) for x in roles] - candidates = self.prune_resource_binding(kind="RoleBinding", - api_version="rbac.authorization.k8s.io/v1", - ref_kind="Role", - ref_namespace_names=ref_roles, - propagation_policy='Foreground') + self.exit_json( + changed=False, + msg="No candidate rolebinding to prune from namespace %s." + % self.params.get("namespace"), + ) + + ref_roles = [(x["metadata"]["namespace"], x["metadata"]["name"]) for x in roles] + candidates = self.prune_resource_binding( + kind="RoleBinding", + api_version="rbac.authorization.k8s.io/v1", + ref_kind="Role", + ref_namespace_names=ref_roles, + propagation_policy="Foreground", + ) if len(candidates) == 0: self.exit_json(changed=False, role_binding=candidates) self.exit_json(changed=True, role_binding=candidates) def auth_prune_clusterroles(self): - params = {'kind': 'ClusterRole', 'api_version': 'rbac.authorization.k8s.io/v1'} - for attr in ('name', 'label_selectors'): + params = {"kind": "ClusterRole", "api_version": "rbac.authorization.k8s.io/v1"} + for attr in ("name", "label_selectors"): if self.params.get(attr): params[attr] = self.params.get(attr) result = self.kubernetes_facts(**params) - if not result['api_found']: - self.fail_json(msg=result['msg']) + if not result["api_found"]: + self.fail_json(msg=result["msg"]) - clusterroles = result.get('resources') + clusterroles = result.get("resources") if len(clusterroles) == 0: - self.exit_json(changed=False, msg="No clusterroles found matching input criteria.") + self.exit_json( + changed=False, msg="No clusterroles found matching input criteria." + ) - ref_clusterroles = [(None, x['metadata']['name']) for x in clusterroles] + ref_clusterroles = [(None, x["metadata"]["name"]) for x in clusterroles] # Prune ClusterRoleBinding - candidates_cluster_binding = self.prune_resource_binding(kind="ClusterRoleBinding", - api_version="rbac.authorization.k8s.io/v1", - ref_kind=None, - ref_namespace_names=ref_clusterroles) + candidates_cluster_binding = self.prune_resource_binding( + kind="ClusterRoleBinding", + api_version="rbac.authorization.k8s.io/v1", + ref_kind=None, + ref_namespace_names=ref_clusterroles, + ) # Prune Role Binding - candidates_namespaced_binding = self.prune_resource_binding(kind="RoleBinding", - api_version="rbac.authorization.k8s.io/v1", - ref_kind='ClusterRole', - ref_namespace_names=ref_clusterroles) - - self.exit_json(changed=True, - cluster_role_binding=candidates_cluster_binding, - role_binding=candidates_namespaced_binding) + candidates_namespaced_binding = self.prune_resource_binding( + kind="RoleBinding", + api_version="rbac.authorization.k8s.io/v1", + ref_kind="ClusterRole", + ref_namespace_names=ref_clusterroles, + ) + + self.exit_json( + changed=True, + cluster_role_binding=candidates_cluster_binding, + role_binding=candidates_namespaced_binding, + ) def list_groups(self, params=None): - options = {'kind': 'Group', 'api_version': 'user.openshift.io/v1'} + options = {"kind": "Group", "api_version": "user.openshift.io/v1"} if params: - for attr in ('name', 'label_selectors'): + for attr in ("name", "label_selectors"): if params.get(attr): options[attr] = params.get(attr) return self.kubernetes_facts(**options) def auth_prune_users(self): - params = {'kind': 'User', 'api_version': 'user.openshift.io/v1'} - for attr in ('name', 'label_selectors'): + params = {"kind": "User", "api_version": "user.openshift.io/v1"} + for attr in ("name", "label_selectors"): if self.params.get(attr): params[attr] = self.params.get(attr) users = self.kubernetes_facts(**params) if len(users) == 0: - self.exit_json(changed=False, msg="No resource type 'User' found matching input criteria.") + self.exit_json( + changed=False, + msg="No resource type 'User' found matching input criteria.", + ) - names = [x['metadata']['name'] for x in users] + names = [x["metadata"]["name"] for x in users] changed = False # Remove the user role binding - rolebinding, changed_role = self.update_resource_binding(ref_kind="User", - ref_names=names, - namespaced=True) + rolebinding, changed_role = self.update_resource_binding( + ref_kind="User", ref_names=names, namespaced=True + ) changed = changed or changed_role # Remove the user cluster role binding - clusterrolesbinding, changed_cr = self.update_resource_binding(ref_kind="User", - ref_names=names) + clusterrolesbinding, changed_cr = self.update_resource_binding( + ref_kind="User", ref_names=names + ) changed = changed or changed_cr # Remove the user from security context constraints - sccs, changed_sccs = self.update_security_context(names, 'users') + sccs, changed_sccs = self.update_security_context(names, "users") changed = changed or changed_sccs # Remove the user from groups @@ -233,14 +284,14 @@ class OpenShiftAdmPruneAuth(AnsibleOpenshiftModule): deleted_groups = [] resource = self.find_resource(kind="Group", api_version="user.openshift.io/v1") for grp in groups: - subjects = grp.get('users', []) + subjects = grp.get("users", []) retainedSubjects = [x for x in subjects if x not in names] if len(subjects) != len(retainedSubjects): - deleted_groups.append(grp['metadata']['name']) + deleted_groups.append(grp["metadata"]["name"]) changed = True if not self.check_mode: upd_group = grp - upd_group.update({'users': retainedSubjects}) + upd_group.update({"users": retainedSubjects}) try: resource.apply(upd_group, namespace=None) except DynamicApiError as exc: @@ -248,62 +299,82 @@ class OpenShiftAdmPruneAuth(AnsibleOpenshiftModule): self.fail_json(msg=msg) # Remove the user's OAuthClientAuthorizations - oauth = self.kubernetes_facts(kind='OAuthClientAuthorization', api_version='oauth.openshift.io/v1') + oauth = self.kubernetes_facts( + kind="OAuthClientAuthorization", api_version="oauth.openshift.io/v1" + ) deleted_auths = [] - resource = self.find_resource(kind="OAuthClientAuthorization", api_version="oauth.openshift.io/v1") + resource = self.find_resource( + kind="OAuthClientAuthorization", api_version="oauth.openshift.io/v1" + ) for authorization in oauth: - if authorization.get('userName', None) in names: - auth_name = authorization['metadata']['name'] + if authorization.get("userName", None) in names: + auth_name = authorization["metadata"]["name"] deleted_auths.append(auth_name) changed = True if not self.check_mode: try: - resource.delete(name=auth_name, namespace=None, body=client.V1DeleteOptions()) + resource.delete( + name=auth_name, + namespace=None, + body=client.V1DeleteOptions(), + ) except DynamicApiError as exc: - msg = "Failed to delete OAuthClientAuthorization {name} due to: {msg}".format(name=auth_name, msg=exc.body) + msg = "Failed to delete OAuthClientAuthorization {name} due to: {msg}".format( + name=auth_name, msg=exc.body + ) self.fail_json(msg=msg) except Exception as e: - msg = "Failed to delete OAuthClientAuthorization {name} due to: {msg}".format(name=auth_name, msg=to_native(e)) + msg = "Failed to delete OAuthClientAuthorization {name} due to: {msg}".format( + name=auth_name, msg=to_native(e) + ) self.fail_json(msg=msg) - self.exit_json(changed=changed, - cluster_role_binding=clusterrolesbinding, - role_binding=rolebinding, - security_context_constraints=sccs, - authorization=deleted_auths, - group=deleted_groups) + self.exit_json( + changed=changed, + cluster_role_binding=clusterrolesbinding, + role_binding=rolebinding, + security_context_constraints=sccs, + authorization=deleted_auths, + group=deleted_groups, + ) def auth_prune_groups(self): groups = self.list_groups(params=self.params) if len(groups) == 0: - self.exit_json(changed=False, result="No resource type 'Group' found matching input criteria.") + self.exit_json( + changed=False, + result="No resource type 'Group' found matching input criteria.", + ) - names = [x['metadata']['name'] for x in groups] + names = [x["metadata"]["name"] for x in groups] changed = False # Remove the groups role binding - rolebinding, changed_role = self.update_resource_binding(ref_kind="Group", - ref_names=names, - namespaced=True) + rolebinding, changed_role = self.update_resource_binding( + ref_kind="Group", ref_names=names, namespaced=True + ) changed = changed or changed_role # Remove the groups cluster role binding - clusterrolesbinding, changed_cr = self.update_resource_binding(ref_kind="Group", - ref_names=names) + clusterrolesbinding, changed_cr = self.update_resource_binding( + ref_kind="Group", ref_names=names + ) changed = changed or changed_cr # Remove the groups security context constraints - sccs, changed_sccs = self.update_security_context(names, 'groups') + sccs, changed_sccs = self.update_security_context(names, "groups") changed = changed or changed_sccs - self.exit_json(changed=changed, - cluster_role_binding=clusterrolesbinding, - role_binding=rolebinding, - security_context_constraints=sccs) + self.exit_json( + changed=changed, + cluster_role_binding=clusterrolesbinding, + role_binding=rolebinding, + security_context_constraints=sccs, + ) def execute_module(self): auth_prune = { - 'roles': self.auth_prune_roles, - 'clusterroles': self.auth_prune_clusterroles, - 'users': self.auth_prune_users, - 'groups': self.auth_prune_groups, + "roles": self.auth_prune_roles, + "clusterroles": self.auth_prune_clusterroles, + "users": self.auth_prune_users, + "groups": self.auth_prune_groups, } - auth_prune[self.params.get('resource')]() + auth_prune[self.params.get("resource")]() diff --git a/ansible_collections/community/okd/plugins/module_utils/openshift_adm_prune_deployments.py b/ansible_collections/community/okd/plugins/module_utils/openshift_adm_prune_deployments.py index 418922d52..315de4b10 100644 --- a/ansible_collections/community/okd/plugins/module_utils/openshift_adm_prune_deployments.py +++ b/ansible_collections/community/okd/plugins/module_utils/openshift_adm_prune_deployments.py @@ -1,14 +1,16 @@ #!/usr/bin/env python -from __future__ import (absolute_import, division, print_function) +from __future__ import absolute_import, division, print_function + __metaclass__ = type from datetime import datetime, timezone -import traceback from ansible.module_utils._text import to_native -from ansible_collections.community.okd.plugins.module_utils.openshift_common import AnsibleOpenshiftModule +from ansible_collections.community.okd.plugins.module_utils.openshift_common import ( + AnsibleOpenshiftModule, +) try: from kubernetes import client @@ -23,7 +25,9 @@ def get_deploymentconfig_for_replicationcontroller(replica_controller): # This is set on replication controller pod template by deployer controller. DeploymentConfigAnnotation = "openshift.io/deployment-config.name" try: - deploymentconfig_name = replica_controller['metadata']['annotations'].get(DeploymentConfigAnnotation) + deploymentconfig_name = replica_controller["metadata"]["annotations"].get( + DeploymentConfigAnnotation + ) if deploymentconfig_name is None or deploymentconfig_name == "": return None return deploymentconfig_name @@ -32,7 +36,6 @@ def get_deploymentconfig_for_replicationcontroller(replica_controller): class OpenShiftAdmPruneDeployment(AnsibleOpenshiftModule): - def __init__(self, **kwargs): super(OpenShiftAdmPruneDeployment, self).__init__(**kwargs) @@ -41,27 +44,33 @@ class OpenShiftAdmPruneDeployment(AnsibleOpenshiftModule): return get_deploymentconfig_for_replicationcontroller(obj) is not None def _zeroReplicaSize(obj): - return obj['spec']['replicas'] == 0 and obj['status']['replicas'] == 0 + return obj["spec"]["replicas"] == 0 and obj["status"]["replicas"] == 0 def _complete_failed(obj): DeploymentStatusAnnotation = "openshift.io/deployment.phase" try: # validate that replication controller status is either 'Complete' or 'Failed' - deployment_phase = obj['metadata']['annotations'].get(DeploymentStatusAnnotation) - return deployment_phase in ('Failed', 'Complete') + deployment_phase = obj["metadata"]["annotations"].get( + DeploymentStatusAnnotation + ) + return deployment_phase in ("Failed", "Complete") except Exception: return False def _younger(obj): - creation_timestamp = datetime.strptime(obj['metadata']['creationTimestamp'], '%Y-%m-%dT%H:%M:%SZ') + creation_timestamp = datetime.strptime( + obj["metadata"]["creationTimestamp"], "%Y-%m-%dT%H:%M:%SZ" + ) now = datetime.now(timezone.utc).replace(tzinfo=None) age = (now - creation_timestamp).seconds / 60 - return age > self.params['keep_younger_than'] + return age > self.params["keep_younger_than"] def _orphan(obj): try: # verify if the deploymentconfig associated to the replication controller is still existing - deploymentconfig_name = get_deploymentconfig_for_replicationcontroller(obj) + deploymentconfig_name = get_deploymentconfig_for_replicationcontroller( + obj + ) params = dict( kind="DeploymentConfig", api_version="apps.openshift.io/v1", @@ -69,14 +78,14 @@ class OpenShiftAdmPruneDeployment(AnsibleOpenshiftModule): namespace=obj["metadata"]["name"], ) exists = self.kubernetes_facts(**params) - return not (exists.get['api_found'] and len(exists['resources']) > 0) + return not (exists.get["api_found"] and len(exists["resources"]) > 0) except Exception: return False predicates = [_deployment, _zeroReplicaSize, _complete_failed] - if self.params['orphans']: + if self.params["orphans"]: predicates.append(_orphan) - if self.params['keep_younger_than']: + if self.params["keep_younger_than"]: predicates.append(_younger) results = replicacontrollers.copy() @@ -86,8 +95,8 @@ class OpenShiftAdmPruneDeployment(AnsibleOpenshiftModule): def execute_module(self): # list replicationcontroller candidate for pruning - kind = 'ReplicationController' - api_version = 'v1' + kind = "ReplicationController" + api_version = "v1" resource = self.find_resource(kind=kind, api_version=api_version, fail=True) # Get ReplicationController @@ -103,7 +112,7 @@ class OpenShiftAdmPruneDeployment(AnsibleOpenshiftModule): self.exit_json(changed=False, replication_controllers=[]) changed = True - delete_options = client.V1DeleteOptions(propagation_policy='Background') + delete_options = client.V1DeleteOptions(propagation_policy="Background") replication_controllers = [] for replica in candidates: try: @@ -111,12 +120,18 @@ class OpenShiftAdmPruneDeployment(AnsibleOpenshiftModule): if not self.check_mode: name = replica["metadata"]["name"] namespace = replica["metadata"]["namespace"] - result = resource.delete(name=name, namespace=namespace, body=delete_options).to_dict() + result = resource.delete( + name=name, namespace=namespace, body=delete_options + ).to_dict() replication_controllers.append(result) except DynamicApiError as exc: - msg = "Failed to delete ReplicationController {namespace}/{name} due to: {msg}".format(namespace=namespace, name=name, msg=exc.body) + msg = "Failed to delete ReplicationController {namespace}/{name} due to: {msg}".format( + namespace=namespace, name=name, msg=exc.body + ) self.fail_json(msg=msg) except Exception as e: - msg = "Failed to delete ReplicationController {namespace}/{name} due to: {msg}".format(namespace=namespace, name=name, msg=to_native(e)) + msg = "Failed to delete ReplicationController {namespace}/{name} due to: {msg}".format( + namespace=namespace, name=name, msg=to_native(e) + ) self.fail_json(msg=msg) self.exit_json(changed=changed, replication_controllers=replication_controllers) diff --git a/ansible_collections/community/okd/plugins/module_utils/openshift_adm_prune_images.py b/ansible_collections/community/okd/plugins/module_utils/openshift_adm_prune_images.py index 442cf9010..768c359b6 100644 --- a/ansible_collections/community/okd/plugins/module_utils/openshift_adm_prune_images.py +++ b/ansible_collections/community/okd/plugins/module_utils/openshift_adm_prune_images.py @@ -1,17 +1,19 @@ #!/usr/bin/env python -from __future__ import (absolute_import, division, print_function) +from __future__ import absolute_import, division, print_function + __metaclass__ = type from datetime import datetime, timezone, timedelta -import traceback import copy from ansible.module_utils._text import to_native from ansible.module_utils.parsing.convert_bool import boolean from ansible.module_utils.six import iteritems -from ansible_collections.community.okd.plugins.module_utils.openshift_common import AnsibleOpenshiftModule +from ansible_collections.community.okd.plugins.module_utils.openshift_common import ( + AnsibleOpenshiftModule, +) from ansible_collections.community.okd.plugins.module_utils.openshift_images_common import ( OpenShiftAnalyzeImageStream, @@ -30,7 +32,7 @@ try: from kubernetes.dynamic.exceptions import ( DynamicApiError, NotFoundError, - ApiException + ApiException, ) except ImportError: pass @@ -67,18 +69,20 @@ def determine_host_registry(module, images, image_streams): managed_images = list(filter(_f_managed_images, images)) # Be sure to pick up the newest managed image which should have an up to date information - sorted_images = sorted(managed_images, - key=lambda x: x["metadata"]["creationTimestamp"], - reverse=True) + sorted_images = sorted( + managed_images, key=lambda x: x["metadata"]["creationTimestamp"], reverse=True + ) docker_image_ref = "" if len(sorted_images) > 0: docker_image_ref = sorted_images[0].get("dockerImageReference", "") else: # 2nd try to get the pull spec from any image stream # Sorting by creation timestamp may not get us up to date info. Modification time would be much - sorted_image_streams = sorted(image_streams, - key=lambda x: x["metadata"]["creationTimestamp"], - reverse=True) + sorted_image_streams = sorted( + image_streams, + key=lambda x: x["metadata"]["creationTimestamp"], + reverse=True, + ) for i_stream in sorted_image_streams: docker_image_ref = i_stream["status"].get("dockerImageRepository", "") if len(docker_image_ref) > 0: @@ -88,7 +92,7 @@ def determine_host_registry(module, images, image_streams): module.exit_json(changed=False, result="no managed image found") result, error = parse_docker_image_ref(docker_image_ref, module) - return result['hostname'] + return result["hostname"] class OpenShiftAdmPruneImages(AnsibleOpenshiftModule): @@ -97,7 +101,7 @@ class OpenShiftAdmPruneImages(AnsibleOpenshiftModule): self.max_creation_timestamp = self.get_max_creation_timestamp() self._rest_client = None - self.registryhost = self.params.get('registry_url') + self.registryhost = self.params.get("registry_url") self.changed = False def list_objects(self): @@ -107,9 +111,9 @@ class OpenShiftAdmPruneImages(AnsibleOpenshiftModule): if self.params.get("namespace") and kind.lower() == "imagestream": namespace = self.params.get("namespace") try: - result[kind] = self.kubernetes_facts(kind=kind, - api_version=version, - namespace=namespace).get('resources') + result[kind] = self.kubernetes_facts( + kind=kind, api_version=version, namespace=namespace + ).get("resources") except DynamicApiError as e: self.fail_json( msg="An error occurred while trying to list objects.", @@ -119,7 +123,7 @@ class OpenShiftAdmPruneImages(AnsibleOpenshiftModule): except Exception as e: self.fail_json( msg="An error occurred while trying to list objects.", - error=to_native(e) + error=to_native(e), ) return result @@ -134,8 +138,8 @@ class OpenShiftAdmPruneImages(AnsibleOpenshiftModule): def rest_client(self): if not self._rest_client: configuration = copy.deepcopy(self.client.configuration) - validate_certs = self.params.get('registry_validate_certs') - ssl_ca_cert = self.params.get('registry_ca_cert') + validate_certs = self.params.get("registry_validate_certs") + ssl_ca_cert = self.params.get("registry_ca_cert") if validate_certs is not None: configuration.verify_ssl = validate_certs if ssl_ca_cert is not None: @@ -146,7 +150,9 @@ class OpenShiftAdmPruneImages(AnsibleOpenshiftModule): def delete_from_registry(self, url): try: - response = self.rest_client.DELETE(url=url, headers=self.client.configuration.api_key) + response = self.rest_client.DELETE( + url=url, headers=self.client.configuration.api_key + ) if response.status == 404: # Unable to delete layer return None @@ -156,8 +162,9 @@ class OpenShiftAdmPruneImages(AnsibleOpenshiftModule): if response.status != 202 and response.status != 204: self.fail_json( msg="Delete URL {0}: Unexpected status code in response: {1}".format( - response.status, url), - reason=response.reason + response.status, url + ), + reason=response.reason, ) return None except ApiException as e: @@ -204,9 +211,7 @@ class OpenShiftAdmPruneImages(AnsibleOpenshiftModule): result = self.request( "PUT", "/apis/{api_version}/namespaces/{namespace}/imagestreams/{name}/status".format( - api_version=api_version, - namespace=namespace, - name=name + api_version=api_version, namespace=namespace, name=name ), body=definition, content_type="application/json", @@ -237,11 +242,10 @@ class OpenShiftAdmPruneImages(AnsibleOpenshiftModule): pass except DynamicApiError as exc: self.fail_json( - msg="Failed to delete object %s/%s due to: %s" % ( - kind, name, exc.body - ), + msg="Failed to delete object %s/%s due to: %s" + % (kind, name, exc.body), reason=exc.reason, - status=exc.status + status=exc.status, ) else: existing = resource.get(name=name) @@ -285,9 +289,11 @@ class OpenShiftAdmPruneImages(AnsibleOpenshiftModule): continue if idx == 0: - istag = "%s/%s:%s" % (stream_namespace, - stream_name, - tag_event_list["tag"]) + istag = "%s/%s:%s" % ( + stream_namespace, + stream_name, + tag_event_list["tag"], + ) if istag in self.used_tags: # keeping because tag is used filtered_items.append(item) @@ -302,20 +308,20 @@ class OpenShiftAdmPruneImages(AnsibleOpenshiftModule): image = self.image_mapping[item["image"]] # check prune over limit size - if prune_over_size_limit and not self.exceeds_limits(stream_namespace, image): + if prune_over_size_limit and not self.exceeds_limits( + stream_namespace, image + ): filtered_items.append(item) continue - image_ref = "%s/%s@%s" % (stream_namespace, - stream_name, - item["image"]) + image_ref = "%s/%s@%s" % (stream_namespace, stream_name, item["image"]) if image_ref in self.used_images: # keeping because tag is used filtered_items.append(item) continue images_to_delete.append(item["image"]) - if self.params.get('prune_registry'): + if self.params.get("prune_registry"): manifests_to_delete.append(image["metadata"]["name"]) path = stream_namespace + "/" + stream_name image_blobs, err = get_image_blobs(image) @@ -325,21 +331,25 @@ class OpenShiftAdmPruneImages(AnsibleOpenshiftModule): return filtered_items, manifests_to_delete, images_to_delete def prune_image_streams(self, stream): - name = stream['metadata']['namespace'] + "/" + stream['metadata']['name'] + name = stream["metadata"]["namespace"] + "/" + stream["metadata"]["name"] if is_too_young_object(stream, self.max_creation_timestamp): # keeping all images because of image stream too young return None, [] - facts = self.kubernetes_facts(kind="ImageStream", - api_version=ApiConfiguration.get("ImageStream"), - name=stream["metadata"]["name"], - namespace=stream["metadata"]["namespace"]) - image_stream = facts.get('resources') + facts = self.kubernetes_facts( + kind="ImageStream", + api_version=ApiConfiguration.get("ImageStream"), + name=stream["metadata"]["name"], + namespace=stream["metadata"]["namespace"], + ) + image_stream = facts.get("resources") if len(image_stream) != 1: # skipping because it does not exist anymore return None, [] stream = image_stream[0] namespace = self.params.get("namespace") - stream_to_update = not namespace or (stream["metadata"]["namespace"] == namespace) + stream_to_update = not namespace or ( + stream["metadata"]["namespace"] == namespace + ) manifests_to_delete, images_to_delete = [], [] deleted_items = False @@ -351,9 +361,9 @@ class OpenShiftAdmPruneImages(AnsibleOpenshiftModule): ( filtered_tag_event, tag_manifests_to_delete, - tag_images_to_delete + tag_images_to_delete, ) = self.prune_image_stream_tag(stream, tag_event_list) - stream['status']['tags'][idx]['items'] = filtered_tag_event + stream["status"]["tags"][idx]["items"] = filtered_tag_event manifests_to_delete += tag_manifests_to_delete images_to_delete += tag_images_to_delete deleted_items = deleted_items or (len(tag_images_to_delete) > 0) @@ -361,11 +371,11 @@ class OpenShiftAdmPruneImages(AnsibleOpenshiftModule): # Deleting tags without items tags = [] for tag in stream["status"].get("tags", []): - if tag['items'] is None or len(tag['items']) == 0: + if tag["items"] is None or len(tag["items"]) == 0: continue tags.append(tag) - stream['status']['tags'] = tags + stream["status"]["tags"] = tags result = None # Update ImageStream if stream_to_update: @@ -402,19 +412,23 @@ class OpenShiftAdmPruneImages(AnsibleOpenshiftModule): def execute_module(self): resources = self.list_objects() - if not self.check_mode and self.params.get('prune_registry'): + if not self.check_mode and self.params.get("prune_registry"): if not self.registryhost: - self.registryhost = determine_host_registry(self.module, resources['Image'], resources['ImageStream']) + self.registryhost = determine_host_registry( + self.module, resources["Image"], resources["ImageStream"] + ) # validate that host has a scheme if "://" not in self.registryhost: self.registryhost = "https://" + self.registryhost # Analyze Image Streams analyze_ref = OpenShiftAnalyzeImageStream( - ignore_invalid_refs=self.params.get('ignore_invalid_refs'), + ignore_invalid_refs=self.params.get("ignore_invalid_refs"), max_creation_timestamp=self.max_creation_timestamp, - module=self.module + module=self.module, + ) + self.used_tags, self.used_images, error = analyze_ref.analyze_image_stream( + resources ) - self.used_tags, self.used_images, error = analyze_ref.analyze_image_stream(resources) if error: self.fail_json(msg=error) @@ -435,16 +449,20 @@ class OpenShiftAdmPruneImages(AnsibleOpenshiftModule): updated_image_streams = [] deleted_tags_images = [] updated_is_mapping = {} - for stream in resources['ImageStream']: + for stream in resources["ImageStream"]: result, images_to_delete = self.prune_image_streams(stream) if result: - updated_is_mapping[result["metadata"]["namespace"] + "/" + result["metadata"]["name"]] = result + updated_is_mapping[ + result["metadata"]["namespace"] + "/" + result["metadata"]["name"] + ] = result updated_image_streams.append(result) deleted_tags_images += images_to_delete # Create a list with images referenced on image stream self.referenced_images = [] - for item in self.kubernetes_facts(kind="ImageStream", api_version="image.openshift.io/v1")["resources"]: + for item in self.kubernetes_facts( + kind="ImageStream", api_version="image.openshift.io/v1" + )["resources"]: name = "%s/%s" % (item["metadata"]["namespace"], item["metadata"]["name"]) if name in updated_is_mapping: item = updated_is_mapping[name] @@ -453,7 +471,7 @@ class OpenShiftAdmPruneImages(AnsibleOpenshiftModule): # Stage 2: delete images images = [] - images_to_delete = [x["metadata"]["name"] for x in resources['Image']] + images_to_delete = [x["metadata"]["name"] for x in resources["Image"]] if self.params.get("namespace") is not None: # When namespace is defined, prune only images that were referenced by ImageStream # from the corresponding namespace diff --git a/ansible_collections/community/okd/plugins/module_utils/openshift_builds.py b/ansible_collections/community/okd/plugins/module_utils/openshift_builds.py index 02e60fd2a..51ebfc281 100644 --- a/ansible_collections/community/okd/plugins/module_utils/openshift_builds.py +++ b/ansible_collections/community/okd/plugins/module_utils/openshift_builds.py @@ -1,15 +1,17 @@ #!/usr/bin/env python -from __future__ import (absolute_import, division, print_function) +from __future__ import absolute_import, division, print_function + __metaclass__ = type from datetime import datetime, timezone, timedelta -import traceback import time from ansible.module_utils._text import to_native -from ansible_collections.community.okd.plugins.module_utils.openshift_common import AnsibleOpenshiftModule +from ansible_collections.community.okd.plugins.module_utils.openshift_common import ( + AnsibleOpenshiftModule, +) try: from kubernetes.dynamic.exceptions import DynamicApiError @@ -36,8 +38,7 @@ class OpenShiftBuilds(AnsibleOpenshiftModule): result = self.request( method="POST", path="/apis/build.openshift.io/v1/namespaces/{namespace}/builds/{name}/clone".format( - namespace=namespace, - name=name + namespace=namespace, name=name ), body=request, content_type="application/json", @@ -47,7 +48,11 @@ class OpenShiftBuilds(AnsibleOpenshiftModule): msg = "Failed to clone Build %s/%s due to: %s" % (namespace, name, exc.body) self.fail_json(msg=msg, status=exc.status, reason=exc.reason) except Exception as e: - msg = "Failed to clone Build %s/%s due to: %s" % (namespace, name, to_native(e)) + msg = "Failed to clone Build %s/%s due to: %s" % ( + namespace, + name, + to_native(e), + ) self.fail_json(msg=msg, error=to_native(e), exception=e) def instantiate_build_config(self, name, namespace, request): @@ -55,22 +60,28 @@ class OpenShiftBuilds(AnsibleOpenshiftModule): result = self.request( method="POST", path="/apis/build.openshift.io/v1/namespaces/{namespace}/buildconfigs/{name}/instantiate".format( - namespace=namespace, - name=name + namespace=namespace, name=name ), body=request, content_type="application/json", ) return result.to_dict() except DynamicApiError as exc: - msg = "Failed to instantiate BuildConfig %s/%s due to: %s" % (namespace, name, exc.body) + msg = "Failed to instantiate BuildConfig %s/%s due to: %s" % ( + namespace, + name, + exc.body, + ) self.fail_json(msg=msg, status=exc.status, reason=exc.reason) except Exception as e: - msg = "Failed to instantiate BuildConfig %s/%s due to: %s" % (namespace, name, to_native(e)) + msg = "Failed to instantiate BuildConfig %s/%s due to: %s" % ( + namespace, + name, + to_native(e), + ) self.fail_json(msg=msg, error=to_native(e), exception=e) def start_build(self): - result = None name = self.params.get("build_config_name") if not name: @@ -79,32 +90,20 @@ class OpenShiftBuilds(AnsibleOpenshiftModule): build_request = { "kind": "BuildRequest", "apiVersion": "build.openshift.io/v1", - "metadata": { - "name": name - }, - "triggeredBy": [ - {"message": "Manually triggered"} - ], + "metadata": {"name": name}, + "triggeredBy": [{"message": "Manually triggered"}], } # Overrides incremental incremental = self.params.get("incremental") if incremental is not None: build_request.update( - { - "sourceStrategyOptions": { - "incremental": incremental - } - } + {"sourceStrategyOptions": {"incremental": incremental}} ) # Environment variable if self.params.get("env_vars"): - build_request.update( - { - "env": self.params.get("env_vars") - } - ) + build_request.update({"env": self.params.get("env_vars")}) # Docker strategy option if self.params.get("build_args"): @@ -121,22 +120,14 @@ class OpenShiftBuilds(AnsibleOpenshiftModule): if no_cache is not None: build_request.update( { - "dockerStrategyOptions": { - "noCache": no_cache - }, + "dockerStrategyOptions": {"noCache": no_cache}, } ) # commit if self.params.get("commit"): build_request.update( - { - "revision": { - "git": { - "commit": self.params.get("commit") - } - } - } + {"revision": {"git": {"commit": self.params.get("commit")}}} ) if self.params.get("build_config_name"): @@ -144,7 +135,7 @@ class OpenShiftBuilds(AnsibleOpenshiftModule): result = self.instantiate_build_config( name=self.params.get("build_config_name"), namespace=self.params.get("namespace"), - request=build_request + request=build_request, ) else: @@ -152,7 +143,7 @@ class OpenShiftBuilds(AnsibleOpenshiftModule): result = self.clone_build( name=self.params.get("build_name"), namespace=self.params.get("namespace"), - request=build_request + request=build_request, ) if result and self.params.get("wait"): @@ -179,10 +170,11 @@ class OpenShiftBuilds(AnsibleOpenshiftModule): break elif last_status_phase in ("Cancelled", "Error", "Failed"): self.fail_json( - msg="Unexpected status for Build %s/%s: %s" % ( + msg="Unexpected status for Build %s/%s: %s" + % ( result["metadata"]["name"], result["metadata"]["namespace"], - last_status_phase + last_status_phase, ) ) time.sleep(wait_sleep) @@ -190,8 +182,11 @@ class OpenShiftBuilds(AnsibleOpenshiftModule): if last_status_phase != "Complete": name = result["metadata"]["name"] namespace = result["metadata"]["namespace"] - msg = "Build %s/%s has not complete after %d second(s)," \ - "current status is %s" % (namespace, name, wait_timeout, last_status_phase) + msg = ( + "Build %s/%s has not complete after %d second(s)," + "current status is %s" + % (namespace, name, wait_timeout, last_status_phase) + ) self.fail_json(msg=msg) @@ -199,9 +194,8 @@ class OpenShiftBuilds(AnsibleOpenshiftModule): self.exit_json(changed=True, builds=result) def cancel_build(self, restart): - - kind = 'Build' - api_version = 'build.openshift.io/v1' + kind = "Build" + api_version = "build.openshift.io/v1" namespace = self.params.get("namespace") phases = ["new", "pending", "running"] @@ -215,16 +209,18 @@ class OpenShiftBuilds(AnsibleOpenshiftModule): else: build_config = self.params.get("build_config_name") # list all builds from namespace - params = dict( - kind=kind, - api_version=api_version, - namespace=namespace - ) + params = dict(kind=kind, api_version=api_version, namespace=namespace) resources = self.kubernetes_facts(**params).get("resources", []) def _filter_builds(build): - config = build["metadata"].get("labels", {}).get("openshift.io/build-config.name") - return build_config is None or (build_config is not None and config in build_config) + config = ( + build["metadata"] + .get("labels", {}) + .get("openshift.io/build-config.name") + ) + return build_config is None or ( + build_config is not None and config in build_config + ) for item in list(filter(_filter_builds, resources)): name = item["metadata"]["name"] @@ -232,16 +228,15 @@ class OpenShiftBuilds(AnsibleOpenshiftModule): names.append(name) if len(names) == 0: - self.exit_json(changed=False, msg="No Build found from namespace %s" % namespace) + self.exit_json( + changed=False, msg="No Build found from namespace %s" % namespace + ) warning = [] builds_to_cancel = [] for name in names: params = dict( - kind=kind, - api_version=api_version, - name=name, - namespace=namespace + kind=kind, api_version=api_version, name=name, namespace=namespace ) resource = self.kubernetes_facts(**params).get("resources", []) @@ -256,7 +251,10 @@ class OpenShiftBuilds(AnsibleOpenshiftModule): if phase in phases: builds_to_cancel.append(resource) else: - warning.append("build %s/%s is not in expected phase, found %s" % (namespace, name, phase)) + warning.append( + "build %s/%s is not in expected phase, found %s" + % (namespace, name, phase) + ) changed = False result = [] @@ -278,9 +276,10 @@ class OpenShiftBuilds(AnsibleOpenshiftModule): result.append(cancelled_build) except DynamicApiError as exc: self.fail_json( - msg="Failed to cancel Build %s/%s due to: %s" % (namespace, name, exc), + msg="Failed to cancel Build %s/%s due to: %s" + % (namespace, name, exc), reason=exc.reason, - status=exc.status + status=exc.status, ) except Exception as e: self.fail_json( @@ -294,10 +293,7 @@ class OpenShiftBuilds(AnsibleOpenshiftModule): name = build["metadata"]["name"] while (datetime.now() - start).seconds < wait_timeout: params = dict( - kind=kind, - api_version=api_version, - name=name, - namespace=namespace + kind=kind, api_version=api_version, name=name, namespace=namespace ) resource = self.kubernetes_facts(**params).get("resources", []) if len(resource) == 0: @@ -307,7 +303,11 @@ class OpenShiftBuilds(AnsibleOpenshiftModule): if last_phase == "Cancelled": return resource, None time.sleep(wait_sleep) - return None, "Build %s/%s is not cancelled as expected, current state is %s" % (namespace, name, last_phase) + return ( + None, + "Build %s/%s is not cancelled as expected, current state is %s" + % (namespace, name, last_phase), + ) if result and self.params.get("wait"): wait_timeout = self.params.get("wait_timeout") @@ -341,8 +341,8 @@ class OpenShiftPruneBuilds(OpenShiftBuilds): def execute_module(self): # list replicationcontroller candidate for pruning - kind = 'Build' - api_version = 'build.openshift.io/v1' + kind = "Build" + api_version = "build.openshift.io/v1" resource = self.find_resource(kind=kind, api_version=api_version, fail=True) self.max_creation_timestamp = None @@ -352,7 +352,12 @@ class OpenShiftPruneBuilds(OpenShiftBuilds): self.max_creation_timestamp = now - timedelta(minutes=keep_younger_than) def _prunable_build(build): - return build["status"]["phase"] in ("Complete", "Failed", "Error", "Cancelled") + return build["status"]["phase"] in ( + "Complete", + "Failed", + "Error", + "Cancelled", + ) def _orphan_build(build): if not _prunable_build(build): @@ -367,7 +372,9 @@ class OpenShiftPruneBuilds(OpenShiftBuilds): def _younger_build(build): if not self.max_creation_timestamp: return False - creation_timestamp = datetime.strptime(build['metadata']['creationTimestamp'], '%Y-%m-%dT%H:%M:%SZ') + creation_timestamp = datetime.strptime( + build["metadata"]["creationTimestamp"], "%Y-%m-%dT%H:%M:%SZ" + ) return creation_timestamp < self.max_creation_timestamp predicates = [ @@ -401,9 +408,17 @@ class OpenShiftPruneBuilds(OpenShiftBuilds): namespace = build["metadata"]["namespace"] resource.delete(name=name, namespace=namespace, body={}) except DynamicApiError as exc: - msg = "Failed to delete Build %s/%s due to: %s" % (namespace, name, exc.body) + msg = "Failed to delete Build %s/%s due to: %s" % ( + namespace, + name, + exc.body, + ) self.fail_json(msg=msg, status=exc.status, reason=exc.reason) except Exception as e: - msg = "Failed to delete Build %s/%s due to: %s" % (namespace, name, to_native(e)) + msg = "Failed to delete Build %s/%s due to: %s" % ( + namespace, + name, + to_native(e), + ) self.fail_json(msg=msg, error=to_native(e), exception=e) self.exit_json(changed=changed, builds=candidates) diff --git a/ansible_collections/community/okd/plugins/module_utils/openshift_common.py b/ansible_collections/community/okd/plugins/module_utils/openshift_common.py index a1318f9a5..b818b7a7b 100644 --- a/ansible_collections/community/okd/plugins/module_utils/openshift_common.py +++ b/ansible_collections/community/okd/plugins/module_utils/openshift_common.py @@ -1,6 +1,7 @@ #!/usr/bin/env python -from __future__ import (absolute_import, division, print_function) +from __future__ import absolute_import, division, print_function + __metaclass__ = type import traceback @@ -9,8 +10,12 @@ from abc import abstractmethod from ansible.module_utils._text import to_native try: - from ansible_collections.kubernetes.core.plugins.module_utils.k8s.client import get_api_client - from ansible_collections.kubernetes.core.plugins.module_utils.k8s.core import AnsibleK8SModule + from ansible_collections.kubernetes.core.plugins.module_utils.k8s.client import ( + get_api_client, + ) + from ansible_collections.kubernetes.core.plugins.module_utils.k8s.core import ( + AnsibleK8SModule, + ) from ansible_collections.kubernetes.core.plugins.module_utils.k8s.service import ( K8sService, diff_objects, @@ -24,7 +29,10 @@ try: merge_params, flatten_list_kind, ) - from ansible_collections.kubernetes.core.plugins.module_utils.k8s.exceptions import CoreException + from ansible_collections.kubernetes.core.plugins.module_utils.k8s.exceptions import ( + CoreException, + ) + HAS_KUBERNETES_COLLECTION = True k8s_collection_import_exception = None K8S_COLLECTION_ERROR = None @@ -35,7 +43,6 @@ except ImportError as e: class AnsibleOpenshiftModule(AnsibleK8SModule): - def __init__(self, **kwargs): super(AnsibleOpenshiftModule, self).__init__(**kwargs) @@ -86,7 +93,6 @@ class AnsibleOpenshiftModule(AnsibleK8SModule): return diff_objects(existing, new) def run_module(self): - try: self.execute_module() except CoreException as e: diff --git a/ansible_collections/community/okd/plugins/module_utils/openshift_docker_image.py b/ansible_collections/community/okd/plugins/module_utils/openshift_docker_image.py index 27dbe6cc7..160c5b50b 100644 --- a/ansible_collections/community/okd/plugins/module_utils/openshift_docker_image.py +++ b/ansible_collections/community/okd/plugins/module_utils/openshift_docker_image.py @@ -1,6 +1,7 @@ #!/usr/bin/env python -from __future__ import (absolute_import, division, print_function) +from __future__ import absolute_import, division, print_function + __metaclass__ = type import re @@ -23,62 +24,68 @@ def convert_storage_to_bytes(value): def is_valid_digest(digest): - digest_algorithm_size = dict( - sha256=64, sha384=96, sha512=128, + sha256=64, + sha384=96, + sha512=128, ) - m = re.match(r'[a-zA-Z0-9-_+.]+:[a-fA-F0-9]+', digest) + m = re.match(r"[a-zA-Z0-9-_+.]+:[a-fA-F0-9]+", digest) if not m: return "Docker digest does not match expected format %s" % digest - idx = digest.find(':') + idx = digest.find(":") # case: "sha256:" with no hex. if idx < 0 or idx == (len(digest) - 1): return "Invalid docker digest %s, no hex value define" % digest algorithm = digest[:idx] if algorithm not in digest_algorithm_size: - return "Unsupported digest algorithm value %s for digest %s" % (algorithm, digest) + return "Unsupported digest algorithm value %s for digest %s" % ( + algorithm, + digest, + ) - hex_value = digest[idx + 1:] + hex_value = digest[idx + 1:] # fmt: skip if len(hex_value) != digest_algorithm_size.get(algorithm): return "Invalid length for digest hex expected %d found %d (digest is %s)" % ( - digest_algorithm_size.get(algorithm), len(hex_value), digest + digest_algorithm_size.get(algorithm), + len(hex_value), + digest, ) def parse_docker_image_ref(image_ref, module=None): """ - Docker Grammar Reference - Reference => name [ ":" tag ] [ "@" digest ] - name => [hostname '/'] component ['/' component]* - hostname => hostcomponent ['.' hostcomponent]* [':' port-number] - hostcomponent => /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/ - port-number => /[0-9]+/ - component => alpha-numeric [separator alpha-numeric]* - alpha-numeric => /[a-z0-9]+/ - separator => /[_.]|__|[-]*/ + Docker Grammar Reference + Reference => name [ ":" tag ] [ "@" digest ] + name => [hostname '/'] component ['/' component]* + hostname => hostcomponent ['.' hostcomponent]* [':' port-number] + hostcomponent => /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/ + port-number => /[0-9]+/ + component => alpha-numeric [separator alpha-numeric]* + alpha-numeric => /[a-z0-9]+/ + separator => /[_.]|__|[-]*/ """ idx = image_ref.find("/") def _contains_any(src, values): return any(x in src for x in values) - result = { - "tag": None, "digest": None - } + result = {"tag": None, "digest": None} default_domain = "docker.io" - if idx < 0 or (not _contains_any(image_ref[:idx], ":.") and image_ref[:idx] != "localhost"): + if idx < 0 or ( + not _contains_any(image_ref[:idx], ":.") and image_ref[:idx] != "localhost" + ): result["hostname"], remainder = default_domain, image_ref else: - result["hostname"], remainder = image_ref[:idx], image_ref[idx + 1:] + result["hostname"], remainder = image_ref[:idx], image_ref[idx + 1:] # fmt: skip # Parse remainder information idx = remainder.find("@") if idx > 0 and len(remainder) > (idx + 1): # docker image reference with digest - component, result["digest"] = remainder[:idx], remainder[idx + 1:] + component, result["digest"] = remainder[:idx], remainder[idx + 1:] # fmt: skip err = is_valid_digest(result["digest"]) if err: if module: @@ -88,7 +95,7 @@ def parse_docker_image_ref(image_ref, module=None): idx = remainder.find(":") if idx > 0 and len(remainder) > (idx + 1): # docker image reference with tag - component, result["tag"] = remainder[:idx], remainder[idx + 1:] + component, result["tag"] = remainder[:idx], remainder[idx + 1:] # fmt: skip else: # name only component = remainder @@ -96,8 +103,6 @@ def parse_docker_image_ref(image_ref, module=None): namespace = None if len(v) > 1: namespace = v[0] - result.update({ - "namespace": namespace, "name": v[-1] - }) + result.update({"namespace": namespace, "name": v[-1]}) return result, None diff --git a/ansible_collections/community/okd/plugins/module_utils/openshift_groups.py b/ansible_collections/community/okd/plugins/module_utils/openshift_groups.py index 5d1aaadc1..473a14f7e 100644 --- a/ansible_collections/community/okd/plugins/module_utils/openshift_groups.py +++ b/ansible_collections/community/okd/plugins/module_utils/openshift_groups.py @@ -3,11 +3,11 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) +from __future__ import absolute_import, division, print_function + __metaclass__ = type -import traceback from datetime import datetime from ansible.module_utils.parsing.convert_bool import boolean @@ -19,18 +19,21 @@ from ansible_collections.community.okd.plugins.module_utils.openshift_ldap impor ldap_split_host_port, OpenshiftLDAPRFC2307, OpenshiftLDAPActiveDirectory, - OpenshiftLDAPAugmentedActiveDirectory + OpenshiftLDAPAugmentedActiveDirectory, ) try: import ldap + HAS_PYTHON_LDAP = True PYTHON_LDAP_ERROR = None except ImportError as e: HAS_PYTHON_LDAP = False PYTHON_LDAP_ERROR = e -from ansible_collections.community.okd.plugins.module_utils.openshift_common import AnsibleOpenshiftModule +from ansible_collections.community.okd.plugins.module_utils.openshift_common import ( + AnsibleOpenshiftModule, +) try: from kubernetes.dynamic.exceptions import DynamicApiError @@ -44,7 +47,9 @@ LDAP_OPENSHIFT_UID_ANNOTATION = "openshift.io/ldap.uid" LDAP_OPENSHIFT_SYNCTIME_ANNOTATION = "openshift.io/ldap.sync-time" -def connect_to_ldap(module, server_uri, bind_dn=None, bind_pw=None, insecure=True, ca_file=None): +def connect_to_ldap( + module, server_uri, bind_dn=None, bind_pw=None, insecure=True, ca_file=None +): if insecure: ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_NEVER) elif ca_file: @@ -56,27 +61,36 @@ def connect_to_ldap(module, server_uri, bind_dn=None, bind_pw=None, insecure=Tru connection.simple_bind_s(bind_dn, bind_pw) return connection except ldap.LDAPError as e: - module.fail_json(msg="Cannot bind to the LDAP server '{0}' due to: {1}".format(server_uri, e)) + module.fail_json( + msg="Cannot bind to the LDAP server '{0}' due to: {1}".format(server_uri, e) + ) def validate_group_annotation(definition, host_ip): - name = definition['metadata']['name'] + name = definition["metadata"]["name"] # Validate LDAP URL Annotation - annotate_url = definition['metadata'].get('annotations', {}).get(LDAP_OPENSHIFT_URL_ANNOTATION) + annotate_url = ( + definition["metadata"].get("annotations", {}).get(LDAP_OPENSHIFT_URL_ANNOTATION) + ) if host_ip: if not annotate_url: - return "group '{0}' marked as having been synced did not have an '{1}' annotation".format(name, LDAP_OPENSHIFT_URL_ANNOTATION) + return "group '{0}' marked as having been synced did not have an '{1}' annotation".format( + name, LDAP_OPENSHIFT_URL_ANNOTATION + ) elif annotate_url != host_ip: return "group '{0}' was not synchronized from: '{1}'".format(name, host_ip) # Validate LDAP UID Annotation - annotate_uid = definition['metadata']['annotations'].get(LDAP_OPENSHIFT_UID_ANNOTATION) + annotate_uid = definition["metadata"]["annotations"].get( + LDAP_OPENSHIFT_UID_ANNOTATION + ) if not annotate_uid: - return "group '{0}' marked as having been synced did not have an '{1}' annotation".format(name, LDAP_OPENSHIFT_UID_ANNOTATION) + return "group '{0}' marked as having been synced did not have an '{1}' annotation".format( + name, LDAP_OPENSHIFT_UID_ANNOTATION + ) return None class OpenshiftLDAPGroups(object): - kind = "Group" version = "user.openshift.io/v1" @@ -88,11 +102,7 @@ class OpenshiftLDAPGroups(object): @property def k8s_group_api(self): if not self.__group_api: - params = dict( - kind=self.kind, - api_version=self.version, - fail=True - ) + params = dict(kind=self.kind, api_version=self.version, fail=True) self.__group_api = self.module.find_resource(**params) return self.__group_api @@ -139,16 +149,26 @@ class OpenshiftLDAPGroups(object): if missing: self.module.fail_json( - msg="The following groups were not found: %s" % ''.join(missing) + msg="The following groups were not found: %s" % "".join(missing) ) else: label_selector = "%s=%s" % (LDAP_OPENSHIFT_HOST_LABEL, host) - resources = self.get_group_info(label_selectors=[label_selector], return_list=True) + resources = self.get_group_info( + label_selectors=[label_selector], return_list=True + ) if not resources: - return None, "Unable to find Group matching label selector '%s'" % label_selector + return ( + None, + "Unable to find Group matching label selector '%s'" + % label_selector, + ) groups = resources if deny_groups: - groups = [item for item in groups if item["metadata"]["name"] not in deny_groups] + groups = [ + item + for item in groups + if item["metadata"]["name"] not in deny_groups + ] uids = [] for grp in groups: @@ -156,7 +176,9 @@ class OpenshiftLDAPGroups(object): if err and allow_groups: # We raise an error for group part of the allow_group not matching LDAP sync criteria return None, err - group_uid = grp['metadata']['annotations'].get(LDAP_OPENSHIFT_UID_ANNOTATION) + group_uid = grp["metadata"]["annotations"].get( + LDAP_OPENSHIFT_UID_ANNOTATION + ) self.cache[group_uid] = grp uids.append(group_uid) return uids, None @@ -174,38 +196,65 @@ class OpenshiftLDAPGroups(object): "kind": "Group", "metadata": { "name": group_name, - "labels": { - LDAP_OPENSHIFT_HOST_LABEL: self.module.host - }, + "labels": {LDAP_OPENSHIFT_HOST_LABEL: self.module.host}, "annotations": { LDAP_OPENSHIFT_URL_ANNOTATION: self.module.netlocation, LDAP_OPENSHIFT_UID_ANNOTATION: group_uid, - } - } + }, + }, } # Make sure we aren't taking over an OpenShift group that is already related to a different LDAP group - ldaphost_label = group["metadata"].get("labels", {}).get(LDAP_OPENSHIFT_HOST_LABEL) + ldaphost_label = ( + group["metadata"].get("labels", {}).get(LDAP_OPENSHIFT_HOST_LABEL) + ) if not ldaphost_label or ldaphost_label != self.module.host: - return None, "Group %s: %s label did not match sync host: wanted %s, got %s" % ( - group_name, LDAP_OPENSHIFT_HOST_LABEL, self.module.host, ldaphost_label + return ( + None, + "Group %s: %s label did not match sync host: wanted %s, got %s" + % ( + group_name, + LDAP_OPENSHIFT_HOST_LABEL, + self.module.host, + ldaphost_label, + ), ) - ldapurl_annotation = group["metadata"].get("annotations", {}).get(LDAP_OPENSHIFT_URL_ANNOTATION) + ldapurl_annotation = ( + group["metadata"].get("annotations", {}).get(LDAP_OPENSHIFT_URL_ANNOTATION) + ) if not ldapurl_annotation or ldapurl_annotation != self.module.netlocation: - return None, "Group %s: %s annotation did not match sync host: wanted %s, got %s" % ( - group_name, LDAP_OPENSHIFT_URL_ANNOTATION, self.module.netlocation, ldapurl_annotation + return ( + None, + "Group %s: %s annotation did not match sync host: wanted %s, got %s" + % ( + group_name, + LDAP_OPENSHIFT_URL_ANNOTATION, + self.module.netlocation, + ldapurl_annotation, + ), ) - ldapuid_annotation = group["metadata"].get("annotations", {}).get(LDAP_OPENSHIFT_UID_ANNOTATION) + ldapuid_annotation = ( + group["metadata"].get("annotations", {}).get(LDAP_OPENSHIFT_UID_ANNOTATION) + ) if not ldapuid_annotation or ldapuid_annotation != group_uid: - return None, "Group %s: %s annotation did not match LDAP UID: wanted %s, got %s" % ( - group_name, LDAP_OPENSHIFT_UID_ANNOTATION, group_uid, ldapuid_annotation + return ( + None, + "Group %s: %s annotation did not match LDAP UID: wanted %s, got %s" + % ( + group_name, + LDAP_OPENSHIFT_UID_ANNOTATION, + group_uid, + ldapuid_annotation, + ), ) # Overwrite Group Users data group["users"] = usernames - group["metadata"]["annotations"][LDAP_OPENSHIFT_SYNCTIME_ANNOTATION] = datetime.now().isoformat() + group["metadata"]["annotations"][ + LDAP_OPENSHIFT_SYNCTIME_ANNOTATION + ] = datetime.now().isoformat() return group, None def create_openshift_groups(self, groups: list): @@ -223,9 +272,15 @@ class OpenshiftLDAPGroups(object): else: definition = self.k8s_group_api.create(definition).to_dict() except DynamicApiError as exc: - self.module.fail_json(msg="Failed to %s Group '%s' due to: %s" % (method, name, exc.body)) + self.module.fail_json( + msg="Failed to %s Group '%s' due to: %s" + % (method, name, exc.body) + ) except Exception as exc: - self.module.fail_json(msg="Failed to %s Group '%s' due to: %s" % (method, name, to_native(exc))) + self.module.fail_json( + msg="Failed to %s Group '%s' due to: %s" + % (method, name, to_native(exc)) + ) equals = False if existing: equals, diff = self.module.diff_objects(existing, definition) @@ -235,27 +290,27 @@ class OpenshiftLDAPGroups(object): return results, diffs, changed def delete_openshift_group(self, name: str): - result = dict( - kind=self.kind, - apiVersion=self.version, - metadata=dict( - name=name - ) - ) + result = dict(kind=self.kind, apiVersion=self.version, metadata=dict(name=name)) if not self.module.check_mode: try: result = self.k8s_group_api.delete(name=name).to_dict() except DynamicApiError as exc: - self.module.fail_json(msg="Failed to delete Group '{0}' due to: {1}".format(name, exc.body)) + self.module.fail_json( + msg="Failed to delete Group '{0}' due to: {1}".format( + name, exc.body + ) + ) except Exception as exc: - self.module.fail_json(msg="Failed to delete Group '{0}' due to: {1}".format(name, to_native(exc))) + self.module.fail_json( + msg="Failed to delete Group '{0}' due to: {1}".format( + name, to_native(exc) + ) + ) return result class OpenshiftGroupsSync(AnsibleOpenshiftModule): - def __init__(self, **kwargs): - super(OpenshiftGroupsSync, self).__init__(**kwargs) self.__k8s_group_api = None self.__ldap_connection = None @@ -267,17 +322,14 @@ class OpenshiftGroupsSync(AnsibleOpenshiftModule): if not HAS_PYTHON_LDAP: self.fail_json( - msg=missing_required_lib('python-ldap'), error=to_native(PYTHON_LDAP_ERROR) + msg=missing_required_lib("python-ldap"), + error=to_native(PYTHON_LDAP_ERROR), ) @property def k8s_group_api(self): if not self.__k8s_group_api: - params = dict( - kind="Group", - api_version="user.openshift.io/v1", - fail=True - ) + params = dict(kind="Group", api_version="user.openshift.io/v1", fail=True) self.__k8s_group_api = self.find_resource(**params) return self.__k8s_group_api @@ -291,11 +343,11 @@ class OpenshiftGroupsSync(AnsibleOpenshiftModule): # Create connection object params = dict( module=self, - server_uri=self.config.get('url'), - bind_dn=self.config.get('bindDN'), - bind_pw=self.config.get('bindPassword'), - insecure=boolean(self.config.get('insecure')), - ca_file=self.config.get('ca') + server_uri=self.config.get("url"), + bind_dn=self.config.get("bindDN"), + bind_pw=self.config.get("bindPassword"), + insecure=boolean(self.config.get("insecure")), + ca_file=self.config.get("ca"), ) self.__ldap_connection = connect_to_ldap(**params) return self.__ldap_connection @@ -327,7 +379,6 @@ class OpenshiftGroupsSync(AnsibleOpenshiftModule): return syncer def synchronize(self): - sync_group_type = self.module.params.get("type") groups_uids = [] @@ -365,7 +416,8 @@ class OpenshiftGroupsSync(AnsibleOpenshiftModule): name, err = syncer.get_username_for_entry(entry) if err: self.exit_json( - msg="Unable to determine username for entry %s: %s" % (entry, err) + msg="Unable to determine username for entry %s: %s" + % (entry, err) ) if isinstance(name, list): usernames.extend(name) @@ -380,13 +432,17 @@ class OpenshiftGroupsSync(AnsibleOpenshiftModule): self.exit_json(msg=err) # Make Openshift group - group, err = ldap_openshift_group.make_openshift_group(uid, group_name, usernames) + group, err = ldap_openshift_group.make_openshift_group( + uid, group_name, usernames + ) if err: self.fail_json(msg=err) openshift_groups.append(group) # Create Openshift Groups - results, diffs, changed = ldap_openshift_group.create_openshift_groups(openshift_groups) + results, diffs, changed = ldap_openshift_group.create_openshift_groups( + openshift_groups + ) self.module.exit_json(changed=True, groups=results) def prune(self): @@ -404,7 +460,10 @@ class OpenshiftGroupsSync(AnsibleOpenshiftModule): # Check if LDAP group exist exists, err = syncer.is_ldapgroup_exists(uid) if err: - msg = "Error determining LDAP group existence for group %s: %s" % (uid, err) + msg = "Error determining LDAP group existence for group %s: %s" % ( + uid, + err, + ) self.module.fail_json(msg=msg) if exists: @@ -429,14 +488,22 @@ class OpenshiftGroupsSync(AnsibleOpenshiftModule): self.fail_json(msg="Invalid LDAP Sync config: %s" % error) # Split host/port - if self.config.get('url'): - result, error = ldap_split_host_port(self.config.get('url')) + if self.config.get("url"): + result, error = ldap_split_host_port(self.config.get("url")) if error: - self.fail_json(msg="Failed to parse url='{0}': {1}".format(self.config.get('url'), error)) - self.netlocation, self.host, self.port = result["netlocation"], result["host"], result["port"] + self.fail_json( + msg="Failed to parse url='{0}': {1}".format( + self.config.get("url"), error + ) + ) + self.netlocation, self.host, self.port = ( + result["netlocation"], + result["host"], + result["port"], + ) self.scheme = result["scheme"] - if self.params.get('state') == 'present': + if self.params.get("state") == "present": self.synchronize() else: self.prune() diff --git a/ansible_collections/community/okd/plugins/module_utils/openshift_images_common.py b/ansible_collections/community/okd/plugins/module_utils/openshift_images_common.py index 67d7123e8..8978a93b0 100644 --- a/ansible_collections/community/okd/plugins/module_utils/openshift_images_common.py +++ b/ansible_collections/community/okd/plugins/module_utils/openshift_images_common.py @@ -1,6 +1,7 @@ #!/usr/bin/env python -from __future__ import (absolute_import, division, print_function) +from __future__ import absolute_import, division, print_function + __metaclass__ = type from datetime import datetime @@ -17,9 +18,9 @@ def get_image_blobs(image): return blobs, "failed to read metadata for image %s" % image["metadata"]["name"] media_type_manifest = ( "application/vnd.docker.distribution.manifest.v2+json", - "application/vnd.oci.image.manifest.v1+json" + "application/vnd.oci.image.manifest.v1+json", ) - media_type_has_config = image['dockerImageManifestMediaType'] in media_type_manifest + media_type_has_config = image["dockerImageManifestMediaType"] in media_type_manifest docker_image_id = docker_image_metadata.get("Id") if media_type_has_config and docker_image_id and len(docker_image_id) > 0: blobs.append(docker_image_id) @@ -29,19 +30,18 @@ def get_image_blobs(image): def is_created_after(creation_timestamp, max_creation_timestamp): if not max_creation_timestamp: return False - creationTimestamp = datetime.strptime(creation_timestamp, '%Y-%m-%dT%H:%M:%SZ') + creationTimestamp = datetime.strptime(creation_timestamp, "%Y-%m-%dT%H:%M:%SZ") return creationTimestamp > max_creation_timestamp def is_too_young_object(obj, max_creation_timestamp): - return is_created_after(obj['metadata']['creationTimestamp'], - max_creation_timestamp) + return is_created_after( + obj["metadata"]["creationTimestamp"], max_creation_timestamp + ) class OpenShiftAnalyzeImageStream(object): - def __init__(self, ignore_invalid_refs, max_creation_timestamp, module): - self.max_creationTimestamp = max_creation_timestamp self.used_tags = {} self.used_images = {} @@ -53,32 +53,34 @@ class OpenShiftAnalyzeImageStream(object): if error: return error - if not result['hostname'] or not result['namespace']: + if not result["hostname"] or not result["namespace"]: # image reference does not match hostname/namespace/name pattern - skipping return None - if not result['digest']: + if not result["digest"]: # Attempt to dereference istag. Since we cannot be sure whether the reference refers to the # integrated registry or not, we ignore the host part completely. As a consequence, we may keep # image otherwise sentenced for a removal just because its pull spec accidentally matches one of # our imagestreamtags. # set the tag if empty - if result['tag'] == "": - result['tag'] = 'latest' - key = "%s/%s:%s" % (result['namespace'], result['name'], result['tag']) + if result["tag"] == "": + result["tag"] = "latest" + key = "%s/%s:%s" % (result["namespace"], result["name"], result["tag"]) if key not in self.used_tags: self.used_tags[key] = [] self.used_tags[key].append(referrer) else: - key = "%s/%s@%s" % (result['namespace'], result['name'], result['digest']) + key = "%s/%s@%s" % (result["namespace"], result["name"], result["digest"]) if key not in self.used_images: self.used_images[key] = [] self.used_images[key].append(referrer) def analyze_refs_from_pod_spec(self, podSpec, referrer): - for container in podSpec.get('initContainers', []) + podSpec.get('containers', []): - image = container.get('image') + for container in podSpec.get("initContainers", []) + podSpec.get( + "containers", [] + ): + image = container.get("image") if len(image.strip()) == 0: # Ignoring container because it has no reference to image continue @@ -93,29 +95,35 @@ class OpenShiftAnalyzeImageStream(object): # pending or running. Additionally, it has to be at least as old as the minimum # age threshold defined by the algorithm. too_young = is_too_young_object(pod, self.max_creationTimestamp) - if pod['status']['phase'] not in ("Running", "Pending") and too_young: + if pod["status"]["phase"] not in ("Running", "Pending") and too_young: continue referrer = { "kind": pod["kind"], "namespace": pod["metadata"]["namespace"], "name": pod["metadata"]["name"], } - err = self.analyze_refs_from_pod_spec(pod['spec'], referrer) + err = self.analyze_refs_from_pod_spec(pod["spec"], referrer) if err: return err return None def analyze_refs_pod_creators(self, resources): keys = ( - "ReplicationController", "DeploymentConfig", "DaemonSet", - "Deployment", "ReplicaSet", "StatefulSet", "Job", "CronJob" + "ReplicationController", + "DeploymentConfig", + "DaemonSet", + "Deployment", + "ReplicaSet", + "StatefulSet", + "Job", + "CronJob", ) for k, objects in iteritems(resources): if k not in keys: continue for obj in objects: - if k == 'CronJob': + if k == "CronJob": spec = obj["spec"]["jobTemplate"]["spec"]["template"]["spec"] else: spec = obj["spec"]["template"]["spec"] @@ -132,64 +140,84 @@ class OpenShiftAnalyzeImageStream(object): def analyze_refs_from_strategy(self, build_strategy, namespace, referrer): # Determine 'from' reference def _determine_source_strategy(): - for src in ('sourceStrategy', 'dockerStrategy', 'customStrategy'): + for src in ("sourceStrategy", "dockerStrategy", "customStrategy"): strategy = build_strategy.get(src) if strategy: - return strategy.get('from') + return strategy.get("from") return None def _parse_image_stream_image_name(name): - v = name.split('@') + v = name.split("@") if len(v) != 2: - return None, None, "expected exactly one @ in the isimage name %s" % name + return ( + None, + None, + "expected exactly one @ in the isimage name %s" % name, + ) name = v[0] tag = v[1] if len(name) == 0 or len(tag) == 0: - return None, None, "image stream image name %s must have a name and ID" % name + return ( + None, + None, + "image stream image name %s must have a name and ID" % name, + ) return name, tag, None def _parse_image_stream_tag_name(name): if "@" in name: - return None, None, "%s is an image stream image, not an image stream tag" % name + return ( + None, + None, + "%s is an image stream image, not an image stream tag" % name, + ) v = name.split(":") if len(v) != 2: - return None, None, "expected exactly one : delimiter in the istag %s" % name + return ( + None, + None, + "expected exactly one : delimiter in the istag %s" % name, + ) name = v[0] tag = v[1] if len(name) == 0 or len(tag) == 0: - return None, None, "image stream tag name %s must have a name and a tag" % name + return ( + None, + None, + "image stream tag name %s must have a name and a tag" % name, + ) return name, tag, None from_strategy = _determine_source_strategy() if from_strategy: - if from_strategy.get('kind') == "DockerImage": - docker_image_ref = from_strategy.get('name').strip() + if from_strategy.get("kind") == "DockerImage": + docker_image_ref = from_strategy.get("name").strip() if len(docker_image_ref) > 0: err = self.analyze_reference_image(docker_image_ref, referrer) - elif from_strategy.get('kind') == "ImageStreamImage": - name, tag, error = _parse_image_stream_image_name(from_strategy.get('name')) + elif from_strategy.get("kind") == "ImageStreamImage": + name, tag, error = _parse_image_stream_image_name( + from_strategy.get("name") + ) if error: if not self.ignore_invalid_refs: return error else: - namespace = from_strategy.get('namespace') or namespace - self.used_images.append({ - 'namespace': namespace, - 'name': name, - 'tag': tag - }) - elif from_strategy.get('kind') == "ImageStreamTag": - name, tag, error = _parse_image_stream_tag_name(from_strategy.get('name')) + namespace = from_strategy.get("namespace") or namespace + self.used_images.append( + {"namespace": namespace, "name": name, "tag": tag} + ) + elif from_strategy.get("kind") == "ImageStreamTag": + name, tag, error = _parse_image_stream_tag_name( + from_strategy.get("name") + ) if error: if not self.ignore_invalid_refs: return error else: - namespace = from_strategy.get('namespace') or namespace - self.used_tags.append({ - 'namespace': namespace, - 'name': name, - 'tag': tag - }) + namespace = from_strategy.get("namespace") or namespace + self.used_tags.append( + {"namespace": namespace, "name": name, "tag": tag} + ) def analyze_refs_from_build_strategy(self, resources): # Json Path is always spec.strategy @@ -203,16 +231,20 @@ class OpenShiftAnalyzeImageStream(object): "namespace": obj["metadata"]["namespace"], "name": obj["metadata"]["name"], } - error = self.analyze_refs_from_strategy(obj['spec']['strategy'], - obj['metadata']['namespace'], - referrer) + error = self.analyze_refs_from_strategy( + obj["spec"]["strategy"], obj["metadata"]["namespace"], referrer + ) if error is not None: - return "%s/%s/%s: %s" % (referrer["kind"], referrer["namespace"], referrer["name"], error) + return "%s/%s/%s: %s" % ( + referrer["kind"], + referrer["namespace"], + referrer["name"], + error, + ) def analyze_image_stream(self, resources): - # Analyze image reference from Pods - error = self.analyze_refs_from_pods(resources['Pod']) + error = self.analyze_refs_from_pods(resources["Pod"]) if error: return None, None, error diff --git a/ansible_collections/community/okd/plugins/module_utils/openshift_import_image.py b/ansible_collections/community/okd/plugins/module_utils/openshift_import_image.py index 01bba82af..c9953d761 100644 --- a/ansible_collections/community/okd/plugins/module_utils/openshift_import_image.py +++ b/ansible_collections/community/okd/plugins/module_utils/openshift_import_image.py @@ -1,16 +1,17 @@ #!/usr/bin/env python -from __future__ import (absolute_import, division, print_function) +from __future__ import absolute_import, division, print_function + __metaclass__ = type -import traceback import copy -from ansible.module_utils._text import to_native from ansible.module_utils.parsing.convert_bool import boolean from ansible.module_utils.six import string_types -from ansible_collections.community.okd.plugins.module_utils.openshift_common import AnsibleOpenshiftModule +from ansible_collections.community.okd.plugins.module_utils.openshift_common import ( + AnsibleOpenshiftModule, +) try: from kubernetes.dynamic.exceptions import DynamicApiError @@ -44,10 +45,17 @@ def follow_imagestream_tag_reference(stream, tag): return name, tag, len(parts) == 2 content = [] - err_cross_stream_ref = "tag %s points to an imagestreamtag from another ImageStream" % tag + err_cross_stream_ref = ( + "tag %s points to an imagestreamtag from another ImageStream" % tag + ) while True: if tag in content: - return tag, None, multiple, "tag %s on the image stream is a reference to same tag" % tag + return ( + tag, + None, + multiple, + "tag %s on the image stream is a reference to same tag" % tag, + ) content.append(tag) tag_ref = _imagestream_has_tag() if not tag_ref: @@ -56,7 +64,10 @@ def follow_imagestream_tag_reference(stream, tag): if not tag_ref.get("from") or tag_ref["from"]["kind"] != "ImageStreamTag": return tag, tag_ref, multiple, None - if tag_ref["from"]["namespace"] != "" and tag_ref["from"]["namespace"] != stream["metadata"]["namespace"]: + if ( + tag_ref["from"]["namespace"] != "" + and tag_ref["from"]["namespace"] != stream["metadata"]["namespace"] + ): return tag, None, multiple, err_cross_stream_ref # The reference needs to be followed with two format patterns: @@ -64,7 +75,12 @@ def follow_imagestream_tag_reference(stream, tag): if ":" in tag_ref["from"]["name"]: name, tagref, result = _imagestream_split_tag(tag_ref["from"]["name"]) if not result: - return tag, None, multiple, "tag %s points to an invalid imagestreamtag" % tag + return ( + tag, + None, + multiple, + "tag %s points to an invalid imagestreamtag" % tag, + ) if name != stream["metadata"]["namespace"]: # anotheris:sometag - this should not happen. return tag, None, multiple, err_cross_stream_ref @@ -80,7 +96,7 @@ class OpenShiftImportImage(AnsibleOpenshiftModule): super(OpenShiftImportImage, self).__init__(**kwargs) self._rest_client = None - self.registryhost = self.params.get('registry_url') + self.registryhost = self.params.get("registry_url") self.changed = False ref_policy = self.params.get("reference_policy") @@ -90,9 +106,7 @@ class OpenShiftImportImage(AnsibleOpenshiftModule): elif ref_policy == "local": ref_policy_type = "Local" - self.ref_policy = { - "type": ref_policy_type - } + self.ref_policy = {"type": ref_policy_type} self.validate_certs = self.params.get("validate_registry_certs") self.cluster_resources = {} @@ -104,15 +118,15 @@ class OpenShiftImportImage(AnsibleOpenshiftModule): "metadata": { "name": stream["metadata"]["name"], "namespace": stream["metadata"]["namespace"], - "resourceVersion": stream["metadata"].get("resourceVersion") + "resourceVersion": stream["metadata"].get("resourceVersion"), }, - "spec": { - "import": True - } + "spec": {"import": True}, } annotations = stream.get("annotations", {}) - insecure = boolean(annotations.get("openshift.io/image.insecureRepository", True)) + insecure = boolean( + annotations.get("openshift.io/image.insecureRepository", True) + ) if self.validate_certs is not None: insecure = not self.validate_certs return isi, insecure @@ -126,7 +140,7 @@ class OpenShiftImportImage(AnsibleOpenshiftModule): }, "importPolicy": { "insecure": insecure, - "scheduled": self.params.get("scheduled") + "scheduled": self.params.get("scheduled"), }, "referencePolicy": self.ref_policy, } @@ -149,26 +163,23 @@ class OpenShiftImportImage(AnsibleOpenshiftModule): scheduled = scheduled or old_tag["importPolicy"].get("scheduled") images = isi["spec"].get("images", []) - images.append({ - "from": { - "kind": "DockerImage", - "name": tags.get(k), - }, - "to": { - "name": k - }, - "importPolicy": { - "insecure": insecure, - "scheduled": scheduled - }, - "referencePolicy": self.ref_policy, - }) + images.append( + { + "from": { + "kind": "DockerImage", + "name": tags.get(k), + }, + "to": {"name": k}, + "importPolicy": {"insecure": insecure, "scheduled": scheduled}, + "referencePolicy": self.ref_policy, + } + ) isi["spec"]["images"] = images return isi def create_image_stream(self, ref): """ - Create new ImageStream and accompanying ImageStreamImport + Create new ImageStream and accompanying ImageStreamImport """ source = self.params.get("source") if not source: @@ -183,27 +194,20 @@ class OpenShiftImportImage(AnsibleOpenshiftModule): ), ) if self.params.get("all") and not ref["tag"]: - spec = dict( - dockerImageRepository=source - ) + spec = dict(dockerImageRepository=source) isi = self.create_image_stream_import_all(stream, source) else: spec = dict( tags=[ { - "from": { - "kind": "DockerImage", - "name": source - }, - "referencePolicy": self.ref_policy + "from": {"kind": "DockerImage", "name": source}, + "referencePolicy": self.ref_policy, } ] ) tags = {ref["tag"]: source} isi = self.create_image_stream_import_tags(stream, tags) - stream.update( - dict(spec=spec) - ) + stream.update(dict(spec=spec)) return stream, isi def import_all(self, istream): @@ -220,8 +224,9 @@ class OpenShiftImportImage(AnsibleOpenshiftModule): if t.get("from") and t["from"].get("kind") == "DockerImage": tags[t.get("name")] = t["from"].get("name") if tags == {}: - msg = "image stream %s/%s does not have tags pointing to external container images" % ( - stream["metadata"]["namespace"], stream["metadata"]["name"] + msg = ( + "image stream %s/%s does not have tags pointing to external container images" + % (stream["metadata"]["namespace"], stream["metadata"]["name"]) ) self.fail_json(msg=msg) isi = self.create_image_stream_import_tags(stream, tags) @@ -236,7 +241,9 @@ class OpenShiftImportImage(AnsibleOpenshiftModule): source = self.params.get("source") # Follow any referential tags to the destination - final_tag, existing, multiple, err = follow_imagestream_tag_reference(stream, tag) + final_tag, existing, multiple, err = follow_imagestream_tag_reference( + stream, tag + ) if err: if err == err_stream_not_found_ref: # Create a new tag @@ -245,7 +252,10 @@ class OpenShiftImportImage(AnsibleOpenshiftModule): # if the from is still empty this means there's no such tag defined # nor we can't create any from .spec.dockerImageRepository if not source: - msg = "the tag %s does not exist on the image stream - choose an existing tag to import" % tag + msg = ( + "the tag %s does not exist on the image stream - choose an existing tag to import" + % tag + ) self.fail_json(msg=msg) existing = { "from": { @@ -257,13 +267,21 @@ class OpenShiftImportImage(AnsibleOpenshiftModule): self.fail_json(msg=err) else: # Disallow re-importing anything other than DockerImage - if existing.get("from", {}) and existing["from"].get("kind") != "DockerImage": + if ( + existing.get("from", {}) + and existing["from"].get("kind") != "DockerImage" + ): msg = "tag {tag} points to existing {kind}/={name}, it cannot be re-imported.".format( - tag=tag, kind=existing["from"]["kind"], name=existing["from"]["name"] + tag=tag, + kind=existing["from"]["kind"], + name=existing["from"]["name"], ) # disallow changing an existing tag if not existing.get("from", {}): - msg = "tag %s already exists - you cannot change the source using this module." % tag + msg = ( + "tag %s already exists - you cannot change the source using this module." + % tag + ) self.fail_json(msg=msg) if source and source != existing["from"]["name"]: if multiple: @@ -271,7 +289,10 @@ class OpenShiftImportImage(AnsibleOpenshiftModule): tag, final_tag, existing["from"]["name"] ) else: - msg = "the tag %s points to %s you cannot change the source using this module." % (tag, final_tag) + msg = ( + "the tag %s points to %s you cannot change the source using this module." + % (tag, final_tag) + ) self.fail_json(msg=msg) # Set the target item to import @@ -309,13 +330,13 @@ class OpenShiftImportImage(AnsibleOpenshiftModule): kind=kind, api_version=api_version, name=ref.get("name"), - namespace=self.params.get("namespace") + namespace=self.params.get("namespace"), ) result = self.kubernetes_facts(**params) if not result["api_found"]: msg = 'Failed to find API for resource with apiVersion "{0}" and kind "{1}"'.format( api_version, kind - ), + ) self.fail_json(msg=msg) imagestream = None if len(result["resources"]) > 0: @@ -335,7 +356,9 @@ class OpenShiftImportImage(AnsibleOpenshiftModule): def parse_image_reference(self, image_ref): result, err = parse_docker_image_ref(image_ref, self.module) if result.get("digest"): - self.fail_json(msg="Cannot import by ID, error with definition: %s" % image_ref) + self.fail_json( + msg="Cannot import by ID, error with definition: %s" % image_ref + ) tag = result.get("tag") or None if not self.params.get("all") and not tag: tag = "latest" @@ -345,7 +368,6 @@ class OpenShiftImportImage(AnsibleOpenshiftModule): return dict(name=result.get("name"), tag=tag, source=image_ref) def execute_module(self): - names = [] name = self.params.get("name") if isinstance(name, string_types): diff --git a/ansible_collections/community/okd/plugins/module_utils/openshift_ldap.py b/ansible_collections/community/okd/plugins/module_utils/openshift_ldap.py index bb9229a72..cb1977489 100644 --- a/ansible_collections/community/okd/plugins/module_utils/openshift_ldap.py +++ b/ansible_collections/community/okd/plugins/module_utils/openshift_ldap.py @@ -3,7 +3,8 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) +from __future__ import absolute_import, division, print_function + __metaclass__ = type @@ -24,109 +25,119 @@ LDAP_SEARCH_OUT_OF_SCOPE_ERROR = "trying to search by DN for an entry that exist def validate_ldap_sync_config(config): # Validate url - url = config.get('url') + url = config.get("url") if not url: return "url should be non empty attribute." # Make sure bindDN and bindPassword are both set, or both unset - bind_dn = config.get('bindDN', "") - bind_password = config.get('bindPassword', "") + bind_dn = config.get("bindDN", "") + bind_password = config.get("bindPassword", "") if (len(bind_dn) == 0) != (len(bind_password) == 0): return "bindDN and bindPassword must both be specified, or both be empty." - insecure = boolean(config.get('insecure')) - ca_file = config.get('ca') + insecure = boolean(config.get("insecure")) + ca_file = config.get("ca") if insecure: - if url.startswith('ldaps://'): + if url.startswith("ldaps://"): return "Cannot use ldaps scheme with insecure=true." if ca_file: return "Cannot specify a ca with insecure=true." elif ca_file and not os.path.isfile(ca_file): return "could not read ca file: {0}.".format(ca_file) - nameMapping = config.get('groupUIDNameMapping', {}) + nameMapping = config.get("groupUIDNameMapping", {}) for k, v in iteritems(nameMapping): if len(k) == 0 or len(v) == 0: return "groupUIDNameMapping has empty key or value" schemas = [] - schema_list = ('rfc2307', 'activeDirectory', 'augmentedActiveDirectory') + schema_list = ("rfc2307", "activeDirectory", "augmentedActiveDirectory") for schema in schema_list: if schema in config: schemas.append(schema) if len(schemas) == 0: - return "No schema-specific config was provided, should be one of %s" % ", ".join(schema_list) + return ( + "No schema-specific config was provided, should be one of %s" + % ", ".join(schema_list) + ) if len(schemas) > 1: - return "Exactly one schema-specific config is required; found (%d) %s" % (len(schemas), ','.join(schemas)) + return "Exactly one schema-specific config is required; found (%d) %s" % ( + len(schemas), + ",".join(schemas), + ) - if schemas[0] == 'rfc2307': + if schemas[0] == "rfc2307": return validate_RFC2307(config.get("rfc2307")) - elif schemas[0] == 'activeDirectory': + elif schemas[0] == "activeDirectory": return validate_ActiveDirectory(config.get("activeDirectory")) - elif schemas[0] == 'augmentedActiveDirectory': + elif schemas[0] == "augmentedActiveDirectory": return validate_AugmentedActiveDirectory(config.get("augmentedActiveDirectory")) def validate_ldap_query(qry, isDNOnly=False): - # validate query scope - scope = qry.get('scope') + scope = qry.get("scope") if scope and scope not in ("", "sub", "one", "base"): return "invalid scope %s" % scope # validate deref aliases - derefAlias = qry.get('derefAliases') + derefAlias = qry.get("derefAliases") if derefAlias and derefAlias not in ("never", "search", "base", "always"): return "not a valid LDAP alias dereferncing behavior: %s", derefAlias # validate timeout - timeout = qry.get('timeout') + timeout = qry.get("timeout") if timeout and float(timeout) < 0: return "timeout must be equal to or greater than zero" # Validate DN only - qry_filter = qry.get('filter', "") + qry_filter = qry.get("filter", "") if isDNOnly: if len(qry_filter) > 0: return 'cannot specify a filter when using "dn" as the UID attribute' else: # validate filter - if len(qry_filter) == 0 or qry_filter[0] != '(': + if len(qry_filter) == 0 or qry_filter[0] != "(": return "filter does not start with an '('" return None def validate_RFC2307(config): - qry = config.get('groupsQuery') + qry = config.get("groupsQuery") if not qry or not isinstance(qry, dict): return "RFC2307: groupsQuery requires a dictionary" error = validate_ldap_query(qry) if not error: return error - for field in ('groupUIDAttribute', 'groupNameAttributes', 'groupMembershipAttributes', - 'userUIDAttribute', 'userNameAttributes'): + for field in ( + "groupUIDAttribute", + "groupNameAttributes", + "groupMembershipAttributes", + "userUIDAttribute", + "userNameAttributes", + ): value = config.get(field) if not value: return "RFC2307: {0} is required.".format(field) - users_qry = config.get('usersQuery') + users_qry = config.get("usersQuery") if not users_qry or not isinstance(users_qry, dict): return "RFC2307: usersQuery requires a dictionary" - isUserDNOnly = (config.get('userUIDAttribute').strip() == 'dn') + isUserDNOnly = config.get("userUIDAttribute").strip() == "dn" return validate_ldap_query(users_qry, isDNOnly=isUserDNOnly) def validate_ActiveDirectory(config, label="ActiveDirectory"): - users_qry = config.get('usersQuery') + users_qry = config.get("usersQuery") if not users_qry or not isinstance(users_qry, dict): return "{0}: usersQuery requires as dictionnary".format(label) error = validate_ldap_query(users_qry) if not error: return error - for field in ('userNameAttributes', 'groupMembershipAttributes'): + for field in ("userNameAttributes", "groupMembershipAttributes"): value = config.get(field) if not value: return "{0}: {1} is required.".format(field, label) @@ -138,24 +149,24 @@ def validate_AugmentedActiveDirectory(config): error = validate_ActiveDirectory(config, label="AugmentedActiveDirectory") if not error: return error - for field in ('groupUIDAttribute', 'groupNameAttributes'): + for field in ("groupUIDAttribute", "groupNameAttributes"): value = config.get(field) if not value: return "AugmentedActiveDirectory: {0} is required".format(field) - groups_qry = config.get('groupsQuery') + groups_qry = config.get("groupsQuery") if not groups_qry or not isinstance(groups_qry, dict): return "AugmentedActiveDirectory: groupsQuery requires as dictionnary." - isGroupDNOnly = (config.get('groupUIDAttribute').strip() == 'dn') + isGroupDNOnly = config.get("groupUIDAttribute").strip() == "dn" return validate_ldap_query(groups_qry, isDNOnly=isGroupDNOnly) def determine_ldap_scope(scope): if scope in ("", "sub"): return ldap.SCOPE_SUBTREE - elif scope == 'base': + elif scope == "base": return ldap.SCOPE_BASE - elif scope == 'one': + elif scope == "one": return ldap.SCOPE_ONELEVEL return None @@ -175,28 +186,28 @@ def determine_deref_aliases(derefAlias): def openshift_ldap_build_base_query(config): qry = {} - if config.get('baseDN'): - qry['base'] = config.get('baseDN') + if config.get("baseDN"): + qry["base"] = config.get("baseDN") - scope = determine_ldap_scope(config.get('scope')) + scope = determine_ldap_scope(config.get("scope")) if scope: - qry['scope'] = scope + qry["scope"] = scope - pageSize = config.get('pageSize') + pageSize = config.get("pageSize") if pageSize and int(pageSize) > 0: - qry['sizelimit'] = int(pageSize) + qry["sizelimit"] = int(pageSize) - timeout = config.get('timeout') + timeout = config.get("timeout") if timeout and int(timeout) > 0: - qry['timeout'] = int(timeout) + qry["timeout"] = int(timeout) - filter = config.get('filter') + filter = config.get("filter") if filter: - qry['filterstr'] = filter + qry["filterstr"] = filter - derefAlias = determine_deref_aliases(config.get('derefAliases')) + derefAlias = determine_deref_aliases(config.get("derefAliases")) if derefAlias: - qry['derefAlias'] = derefAlias + qry["derefAlias"] = derefAlias return qry @@ -205,32 +216,30 @@ def openshift_ldap_get_attribute_for_entry(entry, attribute): if isinstance(attribute, list): attributes = attribute for k in attributes: - if k.lower() == 'dn': + if k.lower() == "dn": return entry[0] v = entry[1].get(k, None) if v: if isinstance(v, list): result = [] for x in v: - if hasattr(x, 'decode'): - result.append(x.decode('utf-8')) + if hasattr(x, "decode"): + result.append(x.decode("utf-8")) else: result.append(x) return result else: - return v.decode('utf-8') if hasattr(v, 'decode') else v + return v.decode("utf-8") if hasattr(v, "decode") else v return "" def ldap_split_host_port(hostport): """ - ldap_split_host_port splits a network address of the form "host:port", - "host%zone:port", "[host]:port" or "[host%zone]:port" into host or - host%zone and port. + ldap_split_host_port splits a network address of the form "host:port", + "host%zone:port", "[host]:port" or "[host%zone]:port" into host or + host%zone and port. """ - result = dict( - scheme=None, netlocation=None, host=None, port=None - ) + result = dict(scheme=None, netlocation=None, host=None, port=None) if not hostport: return result, None @@ -240,10 +249,10 @@ def ldap_split_host_port(hostport): if "://" in hostport: idx = hostport.find(scheme_l) result["scheme"] = hostport[:idx] - netlocation = hostport[idx + len(scheme_l):] + netlocation = hostport[idx + len(scheme_l):] # fmt: skip result["netlocation"] = netlocation - if netlocation[-1] == ']': + if netlocation[-1] == "]": # ipv6 literal (with no port) result["host"] = netlocation @@ -259,21 +268,32 @@ def ldap_split_host_port(hostport): def openshift_ldap_query_for_entries(connection, qry, unique_entry=True): # set deref alias (TODO: need to set a default value to reset for each transaction) - derefAlias = qry.pop('derefAlias', None) + derefAlias = qry.pop("derefAlias", None) if derefAlias: ldap.set_option(ldap.OPT_DEREF, derefAlias) try: result = connection.search_ext_s(**qry) if not result or len(result) == 0: - return None, "Entry not found for base='{0}' and filter='{1}'".format(qry['base'], qry['filterstr']) + return None, "Entry not found for base='{0}' and filter='{1}'".format( + qry["base"], qry["filterstr"] + ) if len(result) > 1 and unique_entry: - if qry.get('scope') == ldap.SCOPE_BASE: - return None, "multiple entries found matching dn={0}: {1}".format(qry['base'], result) + if qry.get("scope") == ldap.SCOPE_BASE: + return None, "multiple entries found matching dn={0}: {1}".format( + qry["base"], result + ) else: - return None, "multiple entries found matching filter {0}: {1}".format(qry['filterstr'], result) + return None, "multiple entries found matching filter {0}: {1}".format( + qry["filterstr"], result + ) return result, None except ldap.NO_SUCH_OBJECT: - return None, "search for entry with base dn='{0}' refers to a non-existent entry".format(qry['base']) + return ( + None, + "search for entry with base dn='{0}' refers to a non-existent entry".format( + qry["base"] + ), + ) def openshift_equal_dn_objects(dn_obj, other_dn_obj): @@ -303,7 +323,9 @@ def openshift_ancestorof_dn(dn, other): if len(dn_obj) >= len(other_dn_obj): return False # Take the last attribute from the other DN to compare against - return openshift_equal_dn_objects(dn_obj, other_dn_obj[len(other_dn_obj) - len(dn_obj):]) + return openshift_equal_dn_objects( + dn_obj, other_dn_obj[len(other_dn_obj) - len(dn_obj):] # fmt: skip + ) class OpenshiftLDAPQueryOnAttribute(object): @@ -324,33 +346,38 @@ class OpenshiftLDAPQueryOnAttribute(object): output = [] hex_string = "0123456789abcdef" for c in buffer: - if ord(c) > 0x7f or c in ('(', ')', '\\', '*') or c == 0: + if ord(c) > 0x7F or c in ("(", ")", "\\", "*") or c == 0: first = ord(c) >> 4 - second = ord(c) & 0xf - output += ['\\', hex_string[first], hex_string[second]] + second = ord(c) & 0xF + output += ["\\", hex_string[first], hex_string[second]] else: output.append(c) - return ''.join(output) + return "".join(output) def build_request(self, ldapuid, attributes): params = copy.deepcopy(self.qry) - if self.query_attribute.lower() == 'dn': + if self.query_attribute.lower() == "dn": if ldapuid: - if not openshift_equal_dn(ldapuid, params['base']) and not openshift_ancestorof_dn(params['base'], ldapuid): + if not openshift_equal_dn( + ldapuid, params["base"] + ) and not openshift_ancestorof_dn(params["base"], ldapuid): return None, LDAP_SEARCH_OUT_OF_SCOPE_ERROR - params['base'] = ldapuid - params['scope'] = ldap.SCOPE_BASE + params["base"] = ldapuid + params["scope"] = ldap.SCOPE_BASE # filter that returns all values - params['filterstr'] = "(objectClass=*)" - params['attrlist'] = attributes + params["filterstr"] = "(objectClass=*)" + params["attrlist"] = attributes else: # Builds the query containing a filter that conjoins the common filter given # in the configuration with the specific attribute filter for which the attribute value is given - specificFilter = "%s=%s" % (self.escape_filter(self.query_attribute), self.escape_filter(ldapuid)) - qry_filter = params.get('filterstr', None) + specificFilter = "%s=%s" % ( + self.escape_filter(self.query_attribute), + self.escape_filter(ldapuid), + ) + qry_filter = params.get("filterstr", None) if qry_filter: - params['filterstr'] = "(&%s(%s))" % (qry_filter, specificFilter) - params['attrlist'] = attributes + params["filterstr"] = "(&%s(%s))" % (qry_filter, specificFilter) + params["attrlist"] = attributes return params, None def ldap_search(self, connection, ldapuid, required_attributes, unique_entry=True): @@ -358,21 +385,29 @@ class OpenshiftLDAPQueryOnAttribute(object): if error: return None, error # set deref alias (TODO: need to set a default value to reset for each transaction) - derefAlias = query.pop('derefAlias', None) + derefAlias = query.pop("derefAlias", None) if derefAlias: ldap.set_option(ldap.OPT_DEREF, derefAlias) try: result = connection.search_ext_s(**query) if not result or len(result) == 0: - return None, "Entry not found for base='{0}' and filter='{1}'".format(query['base'], query['filterstr']) + return None, "Entry not found for base='{0}' and filter='{1}'".format( + query["base"], query["filterstr"] + ) if unique_entry: if len(result) > 1: - return None, "Multiple Entries found matching search criteria: %s (%s)" % (query, result) + return ( + None, + "Multiple Entries found matching search criteria: %s (%s)" + % (query, result), + ) result = result[0] return result, None except ldap.NO_SUCH_OBJECT: - return None, "Entry not found for base='{0}' and filter='{1}'".format(query['base'], query['filterstr']) + return None, "Entry not found for base='{0}' and filter='{1}'".format( + query["base"], query["filterstr"] + ) except Exception as err: return None, "Request %s failed due to: %s" % (query, err) @@ -384,30 +419,43 @@ class OpenshiftLDAPQuery(object): def build_request(self, attributes): params = copy.deepcopy(self.qry) - params['attrlist'] = attributes + params["attrlist"] = attributes return params def ldap_search(self, connection, required_attributes): query = self.build_request(required_attributes) # set deref alias (TODO: need to set a default value to reset for each transaction) - derefAlias = query.pop('derefAlias', None) + derefAlias = query.pop("derefAlias", None) if derefAlias: ldap.set_option(ldap.OPT_DEREF, derefAlias) try: result = connection.search_ext_s(**query) if not result or len(result) == 0: - return None, "Entry not found for base='{0}' and filter='{1}'".format(query['base'], query['filterstr']) + return None, "Entry not found for base='{0}' and filter='{1}'".format( + query["base"], query["filterstr"] + ) return result, None except ldap.NO_SUCH_OBJECT: - return None, "search for entry with base dn='{0}' refers to a non-existent entry".format(query['base']) + return ( + None, + "search for entry with base dn='{0}' refers to a non-existent entry".format( + query["base"] + ), + ) class OpenshiftLDAPInterface(object): - - def __init__(self, connection, groupQuery, groupNameAttributes, groupMembershipAttributes, - userQuery, userNameAttributes, config): - + def __init__( + self, + connection, + groupQuery, + groupNameAttributes, + groupMembershipAttributes, + userQuery, + userNameAttributes, + config, + ): self.connection = connection self.groupQuery = copy.deepcopy(groupQuery) self.groupNameAttributes = groupNameAttributes @@ -416,8 +464,12 @@ class OpenshiftLDAPInterface(object): self.userNameAttributes = userNameAttributes self.config = config - self.tolerate_not_found = boolean(config.get('tolerateMemberNotFoundErrors', False)) - self.tolerate_out_of_scope = boolean(config.get('tolerateMemberOutOfScopeErrors', False)) + self.tolerate_not_found = boolean( + config.get("tolerateMemberNotFoundErrors", False) + ) + self.tolerate_out_of_scope = boolean( + config.get("tolerateMemberOutOfScopeErrors", False) + ) self.required_group_attributes = [self.groupQuery.query_attribute] for x in self.groupNameAttributes + self.groupMembershipAttributes: @@ -434,13 +486,15 @@ class OpenshiftLDAPInterface(object): def get_group_entry(self, uid): """ - get_group_entry returns an LDAP group entry for the given group UID by searching the internal cache - of the LDAPInterface first, then sending an LDAP query if the cache did not contain the entry. + get_group_entry returns an LDAP group entry for the given group UID by searching the internal cache + of the LDAPInterface first, then sending an LDAP query if the cache did not contain the entry. """ if uid in self.cached_groups: return self.cached_groups.get(uid), None - group, err = self.groupQuery.ldap_search(self.connection, uid, self.required_group_attributes) + group, err = self.groupQuery.ldap_search( + self.connection, uid, self.required_group_attributes + ) if err: return None, err self.cached_groups[uid] = group @@ -448,13 +502,15 @@ class OpenshiftLDAPInterface(object): def get_user_entry(self, uid): """ - get_user_entry returns an LDAP group entry for the given user UID by searching the internal cache - of the LDAPInterface first, then sending an LDAP query if the cache did not contain the entry. + get_user_entry returns an LDAP group entry for the given user UID by searching the internal cache + of the LDAPInterface first, then sending an LDAP query if the cache did not contain the entry. """ if uid in self.cached_users: return self.cached_users.get(uid), None - entry, err = self.userQuery.ldap_search(self.connection, uid, self.required_user_attributes) + entry, err = self.userQuery.ldap_search( + self.connection, uid, self.required_user_attributes + ) if err: return None, err self.cached_users[uid] = entry @@ -466,19 +522,19 @@ class OpenshiftLDAPInterface(object): def list_groups(self): group_qry = copy.deepcopy(self.groupQuery.qry) - group_qry['attrlist'] = self.required_group_attributes + group_qry["attrlist"] = self.required_group_attributes groups, err = openshift_ldap_query_for_entries( - connection=self.connection, - qry=group_qry, - unique_entry=False + connection=self.connection, qry=group_qry, unique_entry=False ) if err: return None, err group_uids = [] for entry in groups: - uid = openshift_ldap_get_attribute_for_entry(entry, self.groupQuery.query_attribute) + uid = openshift_ldap_get_attribute_for_entry( + entry, self.groupQuery.query_attribute + ) if not uid: return None, "Unable to find LDAP group uid for entry %s" % entry self.cached_groups[uid] = entry @@ -487,7 +543,7 @@ class OpenshiftLDAPInterface(object): def extract_members(self, uid): """ - returns the LDAP member entries for a group specified with a ldapGroupUID + returns the LDAP member entries for a group specified with a ldapGroupUID """ # Get group entry from LDAP group, err = self.get_group_entry(uid) @@ -514,39 +570,46 @@ class OpenshiftLDAPInterface(object): class OpenshiftLDAPRFC2307(object): - def __init__(self, config, ldap_connection): - self.config = config self.ldap_interface = self.create_ldap_interface(ldap_connection) def create_ldap_interface(self, connection): segment = self.config.get("rfc2307") - groups_base_qry = openshift_ldap_build_base_query(segment['groupsQuery']) - users_base_qry = openshift_ldap_build_base_query(segment['usersQuery']) + groups_base_qry = openshift_ldap_build_base_query(segment["groupsQuery"]) + users_base_qry = openshift_ldap_build_base_query(segment["usersQuery"]) - groups_query = OpenshiftLDAPQueryOnAttribute(groups_base_qry, segment['groupUIDAttribute']) - users_query = OpenshiftLDAPQueryOnAttribute(users_base_qry, segment['userUIDAttribute']) + groups_query = OpenshiftLDAPQueryOnAttribute( + groups_base_qry, segment["groupUIDAttribute"] + ) + users_query = OpenshiftLDAPQueryOnAttribute( + users_base_qry, segment["userUIDAttribute"] + ) params = dict( connection=connection, groupQuery=groups_query, - groupNameAttributes=segment['groupNameAttributes'], - groupMembershipAttributes=segment['groupMembershipAttributes'], + groupNameAttributes=segment["groupNameAttributes"], + groupMembershipAttributes=segment["groupMembershipAttributes"], userQuery=users_query, - userNameAttributes=segment['userNameAttributes'], - config=segment + userNameAttributes=segment["userNameAttributes"], + config=segment, ) return OpenshiftLDAPInterface(**params) def get_username_for_entry(self, entry): - username = openshift_ldap_get_attribute_for_entry(entry, self.ldap_interface.userNameAttributes) + username = openshift_ldap_get_attribute_for_entry( + entry, self.ldap_interface.userNameAttributes + ) if not username: - return None, "The user entry (%s) does not map to a OpenShift User name with the given mapping" % entry + return ( + None, + "The user entry (%s) does not map to a OpenShift User name with the given mapping" + % entry, + ) return username, None def get_group_name_for_uid(self, uid): - # Get name from User defined mapping groupuid_name_mapping = self.config.get("groupUIDNameMapping") if groupuid_name_mapping and uid in groupuid_name_mapping: @@ -555,10 +618,13 @@ class OpenshiftLDAPRFC2307(object): group, err = self.ldap_interface.get_group_entry(uid) if err: return None, err - group_name = openshift_ldap_get_attribute_for_entry(group, self.ldap_interface.groupNameAttributes) + group_name = openshift_ldap_get_attribute_for_entry( + group, self.ldap_interface.groupNameAttributes + ) if not group_name: - error = "The group entry (%s) does not map to an OpenShift Group name with the given name attribute (%s)" % ( - group, self.ldap_interface.groupNameAttributes + error = ( + "The group entry (%s) does not map to an OpenShift Group name with the given name attribute (%s)" + % (group, self.ldap_interface.groupNameAttributes) ) return None, error if isinstance(group_name, list): @@ -570,7 +636,11 @@ class OpenshiftLDAPRFC2307(object): def is_ldapgroup_exists(self, uid): group, err = self.ldap_interface.get_group_entry(uid) if err: - if err == LDAP_SEARCH_OUT_OF_SCOPE_ERROR or err.startswith("Entry not found") or "non-existent entry" in err: + if ( + err == LDAP_SEARCH_OUT_OF_SCOPE_ERROR + or err.startswith("Entry not found") + or "non-existent entry" in err + ): return False, None return False, err if group: @@ -585,7 +655,6 @@ class OpenshiftLDAPRFC2307(object): class OpenshiftLDAP_ADInterface(object): - def __init__(self, connection, user_query, group_member_attr, user_name_attr): self.connection = connection self.userQuery = user_query @@ -609,7 +678,9 @@ class OpenshiftLDAP_ADInterface(object): def populate_cache(self): if not self.cache_populated: self.cache_populated = True - entries, err = self.userQuery.ldap_search(self.connection, self.required_user_attributes) + entries, err = self.userQuery.ldap_search( + self.connection, self.required_user_attributes + ) if err: return err @@ -645,7 +716,9 @@ class OpenshiftLDAP_ADInterface(object): users_in_group = [] for attr in self.groupMembershipAttributes: query_on_attribute = OpenshiftLDAPQueryOnAttribute(self.userQuery.qry, attr) - entries, error = query_on_attribute.ldap_search(self.connection, uid, self.required_user_attributes, unique_entry=False) + entries, error = query_on_attribute.ldap_search( + self.connection, uid, self.required_user_attributes, unique_entry=False + ) if error and "not found" not in error: return None, error if not entries: @@ -660,15 +733,13 @@ class OpenshiftLDAP_ADInterface(object): class OpenshiftLDAPActiveDirectory(object): - def __init__(self, config, ldap_connection): - self.config = config self.ldap_interface = self.create_ldap_interface(ldap_connection) def create_ldap_interface(self, connection): segment = self.config.get("activeDirectory") - base_query = openshift_ldap_build_base_query(segment['usersQuery']) + base_query = openshift_ldap_build_base_query(segment["usersQuery"]) user_query = OpenshiftLDAPQuery(base_query) return OpenshiftLDAP_ADInterface( @@ -679,9 +750,15 @@ class OpenshiftLDAPActiveDirectory(object): ) def get_username_for_entry(self, entry): - username = openshift_ldap_get_attribute_for_entry(entry, self.ldap_interface.userNameAttributes) + username = openshift_ldap_get_attribute_for_entry( + entry, self.ldap_interface.userNameAttributes + ) if not username: - return None, "The user entry (%s) does not map to a OpenShift User name with the given mapping" % entry + return ( + None, + "The user entry (%s) does not map to a OpenShift User name with the given mapping" + % entry, + ) return username, None def get_group_name_for_uid(self, uid): @@ -702,8 +779,15 @@ class OpenshiftLDAPActiveDirectory(object): class OpenshiftLDAP_AugmentedADInterface(OpenshiftLDAP_ADInterface): - - def __init__(self, connection, user_query, group_member_attr, user_name_attr, group_qry, group_name_attr): + def __init__( + self, + connection, + user_query, + group_member_attr, + user_name_attr, + group_qry, + group_name_attr, + ): super(OpenshiftLDAP_AugmentedADInterface, self).__init__( connection, user_query, group_member_attr, user_name_attr ) @@ -719,13 +803,15 @@ class OpenshiftLDAP_AugmentedADInterface(OpenshiftLDAP_ADInterface): def get_group_entry(self, uid): """ - get_group_entry returns an LDAP group entry for the given group UID by searching the internal cache - of the LDAPInterface first, then sending an LDAP query if the cache did not contain the entry. + get_group_entry returns an LDAP group entry for the given group UID by searching the internal cache + of the LDAPInterface first, then sending an LDAP query if the cache did not contain the entry. """ if uid in self.cached_groups: return self.cached_groups.get(uid), None - group, err = self.groupQuery.ldap_search(self.connection, uid, self.required_group_attributes) + group, err = self.groupQuery.ldap_search( + self.connection, uid, self.required_group_attributes + ) if err: return None, err self.cached_groups[uid] = group @@ -750,19 +836,19 @@ class OpenshiftLDAP_AugmentedADInterface(OpenshiftLDAP_ADInterface): class OpenshiftLDAPAugmentedActiveDirectory(OpenshiftLDAPRFC2307): - def __init__(self, config, ldap_connection): - self.config = config self.ldap_interface = self.create_ldap_interface(ldap_connection) def create_ldap_interface(self, connection): segment = self.config.get("augmentedActiveDirectory") - user_base_query = openshift_ldap_build_base_query(segment['usersQuery']) - groups_base_qry = openshift_ldap_build_base_query(segment['groupsQuery']) + user_base_query = openshift_ldap_build_base_query(segment["usersQuery"]) + groups_base_qry = openshift_ldap_build_base_query(segment["groupsQuery"]) user_query = OpenshiftLDAPQuery(user_base_query) - groups_query = OpenshiftLDAPQueryOnAttribute(groups_base_qry, segment['groupUIDAttribute']) + groups_query = OpenshiftLDAPQueryOnAttribute( + groups_base_qry, segment["groupUIDAttribute"] + ) return OpenshiftLDAP_AugmentedADInterface( connection=connection, @@ -770,7 +856,7 @@ class OpenshiftLDAPAugmentedActiveDirectory(OpenshiftLDAPRFC2307): group_member_attr=segment["groupMembershipAttributes"], user_name_attr=segment["userNameAttributes"], group_qry=groups_query, - group_name_attr=segment["groupNameAttributes"] + group_name_attr=segment["groupNameAttributes"], ) def is_ldapgroup_exists(self, uid): diff --git a/ansible_collections/community/okd/plugins/module_utils/openshift_process.py b/ansible_collections/community/okd/plugins/module_utils/openshift_process.py index 6fa69d13c..3996aedc2 100644 --- a/ansible_collections/community/okd/plugins/module_utils/openshift_process.py +++ b/ansible_collections/community/okd/plugins/module_utils/openshift_process.py @@ -1,15 +1,16 @@ #!/usr/bin/env python -from __future__ import (absolute_import, division, print_function) +from __future__ import absolute_import, division, print_function + __metaclass__ = type import os -import traceback from ansible.module_utils._text import to_native - -from ansible_collections.community.okd.plugins.module_utils.openshift_common import AnsibleOpenshiftModule +from ansible_collections.community.okd.plugins.module_utils.openshift_common import ( + AnsibleOpenshiftModule, +) try: from kubernetes.dynamic.exceptions import DynamicApiError @@ -124,7 +125,6 @@ class OpenShiftProcess(AnsibleOpenshiftModule): self.exit_json(**result) def create_resources(self, definitions): - params = {"namespace": self.params.get("namespace_target")} self.params["apply"] = False @@ -139,9 +139,7 @@ class OpenShiftProcess(AnsibleOpenshiftModule): continue kind = definition.get("kind") if kind and kind.endswith("List"): - flattened_definitions.extend( - self.flatten_list_kind(definition, params) - ) + flattened_definitions.extend(self.flatten_list_kind(definition, params)) else: flattened_definitions.append(self.merge_params(definition, params)) diff --git a/ansible_collections/community/okd/plugins/module_utils/openshift_registry.py b/ansible_collections/community/okd/plugins/module_utils/openshift_registry.py index 32a1830df..557a7d1f5 100644 --- a/ansible_collections/community/okd/plugins/module_utils/openshift_registry.py +++ b/ansible_collections/community/okd/plugins/module_utils/openshift_registry.py @@ -1,12 +1,15 @@ #!/usr/bin/env python -from __future__ import (absolute_import, division, print_function) +from __future__ import absolute_import, division, print_function + __metaclass__ = type import traceback from urllib.parse import urlparse -from ansible_collections.community.okd.plugins.module_utils.openshift_common import AnsibleOpenshiftModule +from ansible_collections.community.okd.plugins.module_utils.openshift_common import ( + AnsibleOpenshiftModule, +) from ansible_collections.community.okd.plugins.module_utils.openshift_docker_image import ( parse_docker_image_ref, @@ -15,6 +18,7 @@ from ansible_collections.community.okd.plugins.module_utils.openshift_docker_ima try: from requests import request from requests.auth import HTTPBasicAuth + HAS_REQUESTS_MODULE = True requests_import_exception = None except ImportError as e: @@ -32,11 +36,7 @@ class OpenShiftRegistry(AnsibleOpenshiftModule): kind = "ImageStream" api_version = "image.openshift.io/v1" - params = dict( - kind=kind, - api_version=api_version, - namespace=namespace - ) + params = dict(kind=kind, api_version=api_version, namespace=namespace) result = self.kubernetes_facts(**params) imagestream = [] if len(result["resources"]) > 0: @@ -44,7 +44,6 @@ class OpenShiftRegistry(AnsibleOpenshiftModule): return imagestream def find_registry_info(self): - def _determine_registry(image_stream): public, internal = None, None docker_repo = image_stream["status"].get("publicDockerImageRepository") @@ -72,39 +71,46 @@ class OpenShiftRegistry(AnsibleOpenshiftModule): self.fail_json(msg="The integrated registry has not been configured") return internal, public - self.fail_json(msg="No Image Streams could be located to retrieve registry info.") + self.fail_json( + msg="No Image Streams could be located to retrieve registry info." + ) def execute_module(self): result = {} - result["internal_hostname"], result["public_hostname"] = self.find_registry_info() + ( + result["internal_hostname"], + result["public_hostname"], + ) = self.find_registry_info() if self.check: public_registry = result["public_hostname"] if not public_registry: result["check"] = dict( - reached=False, - msg="Registry does not have a public hostname." + reached=False, msg="Registry does not have a public hostname." ) else: - headers = { - 'Content-Type': 'application/json' - } - params = { - 'method': 'GET', - 'verify': False - } + headers = {"Content-Type": "application/json"} + params = {"method": "GET", "verify": False} if self.client.configuration.api_key: headers.update(self.client.configuration.api_key) - elif self.client.configuration.username and self.client.configuration.password: + elif ( + self.client.configuration.username + and self.client.configuration.password + ): if not HAS_REQUESTS_MODULE: result["check"] = dict( reached=False, msg="The requests python package is missing, try `pip install requests`", - error=requests_import_exception + error=requests_import_exception, ) self.exit_json(**result) params.update( - dict(auth=HTTPBasicAuth(self.client.configuration.username, self.client.configuration.password)) + dict( + auth=HTTPBasicAuth( + self.client.configuration.username, + self.client.configuration.password, + ) + ) ) # verify ssl @@ -112,23 +118,20 @@ class OpenShiftRegistry(AnsibleOpenshiftModule): if len(host.scheme) == 0: registry_url = "https://" + public_registry - if registry_url.startswith("https://") and self.client.configuration.ssl_ca_cert: - params.update( - dict(verify=self.client.configuration.ssl_ca_cert) - ) - params.update( - dict(headers=headers) - ) + if ( + registry_url.startswith("https://") + and self.client.configuration.ssl_ca_cert + ): + params.update(dict(verify=self.client.configuration.ssl_ca_cert)) + params.update(dict(headers=headers)) last_bad_status, last_bad_reason = None, None for path in ("/", "/healthz"): - params.update( - dict(url=registry_url + path) - ) + params.update(dict(url=registry_url + path)) response = request(**params) if response.status_code == 200: result["check"] = dict( reached=True, - msg="The local client can contact the integrated registry." + msg="The local client can contact the integrated registry.", ) self.exit_json(**result) last_bad_reason = response.reason @@ -136,9 +139,8 @@ class OpenShiftRegistry(AnsibleOpenshiftModule): result["check"] = dict( reached=False, - msg="Unable to contact the integrated registry using local client. Status=%d, Reason=%s" % ( - last_bad_status, last_bad_reason - ) + msg="Unable to contact the integrated registry using local client. Status=%d, Reason=%s" + % (last_bad_status, last_bad_reason), ) self.exit_json(**result) diff --git a/ansible_collections/community/okd/plugins/modules/k8s.py b/ansible_collections/community/okd/plugins/modules/k8s.py index c3b8d1b66..ae3a9b493 100644 --- a/ansible_collections/community/okd/plugins/modules/k8s.py +++ b/ansible_collections/community/okd/plugins/modules/k8s.py @@ -10,7 +10,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type # STARTREMOVE (downstream) -DOCUMENTATION = r''' +DOCUMENTATION = r""" module: k8s @@ -142,9 +142,9 @@ requirements: - "python >= 3.6" - "kubernetes >= 12.0.0" - "PyYAML >= 3.11" -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Create a k8s namespace community.okd.k8s: name: testing @@ -169,10 +169,10 @@ EXAMPLES = r''' app: galaxy service: web ports: - - protocol: TCP - targetPort: 8000 - name: port-8000-tcp - port: 8000 + - protocol: TCP + targetPort: 8000 + name: port-8000-tcp + port: 8000 - name: Remove an existing Service object community.okd.k8s: @@ -206,18 +206,18 @@ EXAMPLES = r''' state: present definition: "{{ lookup('template', '/testing/deployment.yml') | from_yaml }}" validate: - fail_on_error: yes + fail_on_error: true - name: warn on validation errors, check for unexpected properties community.okd.k8s: state: present definition: "{{ lookup('template', '/testing/deployment.yml') | from_yaml }}" validate: - fail_on_error: no - strict: yes -''' + fail_on_error: false + strict: true +""" -RETURN = r''' +RETURN = r""" result: description: - The created, patched, or otherwise present object. Will be empty in the case of a deletion. @@ -254,22 +254,26 @@ result: type: int sample: 48 error: - description: error while trying to create/delete the object. + description: Error while trying to create/delete the object. returned: error type: complex -''' +""" # ENDREMOVE (downstream) from ansible_collections.kubernetes.core.plugins.module_utils.args_common import ( - NAME_ARG_SPEC, RESOURCE_ARG_SPEC, AUTH_ARG_SPEC, WAIT_ARG_SPEC, DELETE_OPTS_ARG_SPEC + NAME_ARG_SPEC, + RESOURCE_ARG_SPEC, + AUTH_ARG_SPEC, + WAIT_ARG_SPEC, + DELETE_OPTS_ARG_SPEC, ) def validate_spec(): return dict( - fail_on_error=dict(type='bool'), + fail_on_error=dict(type="bool"), version=dict(), - strict=dict(type='bool', default=True) + strict=dict(type="bool", default=True), ) @@ -279,30 +283,41 @@ def argspec(): argument_spec.update(RESOURCE_ARG_SPEC) argument_spec.update(AUTH_ARG_SPEC) argument_spec.update(WAIT_ARG_SPEC) - argument_spec['merge_type'] = dict(type='list', elements='str', choices=['json', 'merge', 'strategic-merge']) - argument_spec['validate'] = dict(type='dict', default=None, options=validate_spec()) - argument_spec['append_hash'] = dict(type='bool', default=False) - argument_spec['apply'] = dict(type='bool', default=False) - argument_spec['template'] = dict(type='raw', default=None) - argument_spec['delete_options'] = dict(type='dict', default=None, options=DELETE_OPTS_ARG_SPEC) - argument_spec['continue_on_error'] = dict(type='bool', default=False) - argument_spec['state'] = dict(default='present', choices=['present', 'absent', 'patched']) - argument_spec['force'] = dict(type='bool', default=False) + argument_spec["merge_type"] = dict( + type="list", elements="str", choices=["json", "merge", "strategic-merge"] + ) + argument_spec["validate"] = dict(type="dict", default=None, options=validate_spec()) + argument_spec["append_hash"] = dict(type="bool", default=False) + argument_spec["apply"] = dict(type="bool", default=False) + argument_spec["template"] = dict(type="raw", default=None) + argument_spec["delete_options"] = dict( + type="dict", default=None, options=DELETE_OPTS_ARG_SPEC + ) + argument_spec["continue_on_error"] = dict(type="bool", default=False) + argument_spec["state"] = dict( + default="present", choices=["present", "absent", "patched"] + ) + argument_spec["force"] = dict(type="bool", default=False) return argument_spec def main(): mutually_exclusive = [ - ('resource_definition', 'src'), - ('merge_type', 'apply'), - ('template', 'resource_definition'), - ('template', 'src'), + ("resource_definition", "src"), + ("merge_type", "apply"), + ("template", "resource_definition"), + ("template", "src"), ] from ansible_collections.community.okd.plugins.module_utils.k8s import OKDRawModule - module = OKDRawModule(argument_spec=argspec(), supports_check_mode=True, mutually_exclusive=mutually_exclusive) + + module = OKDRawModule( + argument_spec=argspec(), + supports_check_mode=True, + mutually_exclusive=mutually_exclusive, + ) module.run_module() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/okd/plugins/modules/openshift_adm_groups_sync.py b/ansible_collections/community/okd/plugins/modules/openshift_adm_groups_sync.py index 66b0fbb15..3f5f14be3 100644 --- a/ansible_collections/community/okd/plugins/modules/openshift_adm_groups_sync.py +++ b/ansible_collections/community/okd/plugins/modules/openshift_adm_groups_sync.py @@ -96,31 +96,31 @@ EXAMPLES = r""" - name: Sync all groups from an LDAP server openshift_adm_groups_sync: src: - kind: LDAPSyncConfig - apiVersion: v1 - url: ldap://localhost:1390 - insecure: true - bindDN: cn=admin,dc=example,dc=org - bindPassword: adminpassword - rfc2307: - groupsQuery: - baseDN: "cn=admins,ou=groups,dc=example,dc=org" - scope: sub - derefAliases: never - filter: (objectClass=*) - pageSize: 0 - groupUIDAttribute: dn - groupNameAttributes: [ cn ] - groupMembershipAttributes: [ member ] - usersQuery: - baseDN: "ou=users,dc=example,dc=org" - scope: sub - derefAliases: never - pageSize: 0 - userUIDAttribute: dn - userNameAttributes: [ mail ] - tolerateMemberNotFoundErrors: true - tolerateMemberOutOfScopeErrors: true + kind: LDAPSyncConfig + apiVersion: v1 + url: ldap://localhost:1390 + insecure: true + bindDN: cn=admin,dc=example,dc=org + bindPassword: adminpassword + rfc2307: + groupsQuery: + baseDN: "cn=admins,ou=groups,dc=example,dc=org" + scope: sub + derefAliases: never + filter: (objectClass=*) + pageSize: 0 + groupUIDAttribute: dn + groupNameAttributes: [cn] + groupMembershipAttributes: [member] + usersQuery: + baseDN: "ou=users,dc=example,dc=org" + scope: sub + derefAliases: never + pageSize: 0 + userUIDAttribute: dn + userNameAttributes: [mail] + tolerateMemberNotFoundErrors: true + tolerateMemberOutOfScopeErrors: true # Sync all groups except the ones from the deny_groups from an LDAP server - name: Sync all groups from an LDAP server using deny_groups @@ -192,20 +192,21 @@ builds: # ENDREMOVE (downstream) import copy -import traceback -from ansible_collections.kubernetes.core.plugins.module_utils.args_common import AUTH_ARG_SPEC +from ansible_collections.kubernetes.core.plugins.module_utils.args_common import ( + AUTH_ARG_SPEC, +) def argument_spec(): args = copy.deepcopy(AUTH_ARG_SPEC) args.update( dict( - state=dict(type='str', choices=['absent', 'present'], default='present'), - type=dict(type='str', choices=['ldap', 'openshift'], default='ldap'), - sync_config=dict(type='dict', aliases=['config', 'src'], required=True), - deny_groups=dict(type='list', elements='str', default=[]), - allow_groups=dict(type='list', elements='str', default=[]), + state=dict(type="str", choices=["absent", "present"], default="present"), + type=dict(type="str", choices=["ldap", "openshift"], default="ldap"), + sync_config=dict(type="dict", aliases=["config", "src"], required=True), + deny_groups=dict(type="list", elements="str", default=[]), + allow_groups=dict(type="list", elements="str", default=[]), ) ) return args @@ -213,12 +214,14 @@ def argument_spec(): def main(): from ansible_collections.community.okd.plugins.module_utils.openshift_groups import ( - OpenshiftGroupsSync + OpenshiftGroupsSync, ) - module = OpenshiftGroupsSync(argument_spec=argument_spec(), supports_check_mode=True) + module = OpenshiftGroupsSync( + argument_spec=argument_spec(), supports_check_mode=True + ) module.run_module() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/okd/plugins/modules/openshift_adm_migrate_template_instances.py b/ansible_collections/community/okd/plugins/modules/openshift_adm_migrate_template_instances.py index 05d5563cd..ba3039d54 100644 --- a/ansible_collections/community/okd/plugins/modules/openshift_adm_migrate_template_instances.py +++ b/ansible_collections/community/okd/plugins/modules/openshift_adm_migrate_template_instances.py @@ -31,14 +31,14 @@ requirements: """ EXAMPLES = r""" - - name: Migrate TemplateInstances in namespace=test - community.okd.openshift_adm_migrate_template_instances: - namespace: test - register: _result - - - name: Migrate TemplateInstances in all namespaces - community.okd.openshift_adm_migrate_template_instances: - register: _result +- name: Migrate TemplateInstances in namespace=test + community.okd.openshift_adm_migrate_template_instances: + namespace: test + register: _result + +- name: Migrate TemplateInstances in all namespaces + community.okd.openshift_adm_migrate_template_instances: + register: _result """ RETURN = r""" @@ -235,7 +235,9 @@ result: from ansible.module_utils._text import to_native -from ansible_collections.community.okd.plugins.module_utils.openshift_common import AnsibleOpenshiftModule +from ansible_collections.community.okd.plugins.module_utils.openshift_common import ( + AnsibleOpenshiftModule, +) try: from kubernetes.dynamic.exceptions import DynamicApiError @@ -339,9 +341,7 @@ class OpenShiftMigrateTemplateInstances(AnsibleOpenshiftModule): if ti_to_be_migrated: if self.check_mode: - self.exit_json( - **{"changed": True, "result": ti_to_be_migrated} - ) + self.exit_json(**{"changed": True, "result": ti_to_be_migrated}) else: for ti_elem in ti_to_be_migrated: results["result"].append( @@ -363,7 +363,9 @@ def argspec(): def main(): argument_spec = argspec() - module = OpenShiftMigrateTemplateInstances(argument_spec=argument_spec, supports_check_mode=True) + module = OpenShiftMigrateTemplateInstances( + argument_spec=argument_spec, supports_check_mode=True + ) module.run_module() diff --git a/ansible_collections/community/okd/plugins/modules/openshift_adm_prune_auth.py b/ansible_collections/community/okd/plugins/modules/openshift_adm_prune_auth.py index a9833fa50..fe0aec2c9 100644 --- a/ansible_collections/community/okd/plugins/modules/openshift_adm_prune_auth.py +++ b/ansible_collections/community/okd/plugins/modules/openshift_adm_prune_auth.py @@ -5,10 +5,11 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type # STARTREMOVE (downstream) -DOCUMENTATION = r''' +DOCUMENTATION = r""" module: openshift_adm_prune_auth @@ -58,9 +59,9 @@ options: requirements: - python >= 3.6 - kubernetes >= 12.0.0 -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Prune all roles from default namespace openshift_adm_prune_auth: resource: roles @@ -72,10 +73,10 @@ EXAMPLES = r''' namespace: testing label_selectors: - phase=production -''' +""" -RETURN = r''' +RETURN = r""" cluster_role_binding: type: list description: list of cluster role binding deleted. @@ -96,37 +97,45 @@ group: type: list description: list of Security Context Constraints deleted. returned: I(resource=users) -''' +""" # ENDREMOVE (downstream) import copy -from ansible_collections.kubernetes.core.plugins.module_utils.args_common import AUTH_ARG_SPEC +from ansible_collections.kubernetes.core.plugins.module_utils.args_common import ( + AUTH_ARG_SPEC, +) def argument_spec(): args = copy.deepcopy(AUTH_ARG_SPEC) args.update( dict( - resource=dict(type='str', required=True, choices=['roles', 'clusterroles', 'users', 'groups']), - namespace=dict(type='str'), - name=dict(type='str'), - label_selectors=dict(type='list', elements='str'), + resource=dict( + type="str", + required=True, + choices=["roles", "clusterroles", "users", "groups"], + ), + namespace=dict(type="str"), + name=dict(type="str"), + label_selectors=dict(type="list", elements="str"), ) ) return args def main(): - from ansible_collections.community.okd.plugins.module_utils.openshift_adm_prune_auth import ( - OpenShiftAdmPruneAuth) + OpenShiftAdmPruneAuth, + ) - module = OpenShiftAdmPruneAuth(argument_spec=argument_spec(), - mutually_exclusive=[("name", "label_selectors")], - supports_check_mode=True) + module = OpenShiftAdmPruneAuth( + argument_spec=argument_spec(), + mutually_exclusive=[("name", "label_selectors")], + supports_check_mode=True, + ) module.run_module() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/okd/plugins/modules/openshift_adm_prune_builds.py b/ansible_collections/community/okd/plugins/modules/openshift_adm_prune_builds.py index b0b831e6f..ffc86f16e 100644 --- a/ansible_collections/community/okd/plugins/modules/openshift_adm_prune_builds.py +++ b/ansible_collections/community/okd/plugins/modules/openshift_adm_prune_builds.py @@ -5,10 +5,11 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type # STARTREMOVE (downstream) -DOCUMENTATION = r''' +DOCUMENTATION = r""" module: openshift_adm_prune_builds @@ -45,14 +46,14 @@ options: requirements: - python >= 3.6 - kubernetes >= 12.0.0 -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" # Run deleting older completed and failed builds and also including # all builds whose associated BuildConfig no longer exists - name: Run delete orphan Builds community.okd.openshift_adm_prune_builds: - orphans: True + orphans: true # Run deleting older completed and failed builds keep younger than 2hours - name: Run delete builds, keep younger than 2h @@ -63,9 +64,9 @@ EXAMPLES = r''' - name: Run delete builds from namespace community.okd.openshift_adm_prune_builds: namespace: testing_namespace -''' +""" -RETURN = r''' +RETURN = r""" builds: description: - The builds that were deleted @@ -92,33 +93,38 @@ builds: description: Current status details for the object. returned: success type: dict -''' +""" # ENDREMOVE (downstream) import copy -from ansible_collections.kubernetes.core.plugins.module_utils.args_common import AUTH_ARG_SPEC +from ansible_collections.kubernetes.core.plugins.module_utils.args_common import ( + AUTH_ARG_SPEC, +) def argument_spec(): args = copy.deepcopy(AUTH_ARG_SPEC) args.update( dict( - namespace=dict(type='str'), - keep_younger_than=dict(type='int'), - orphans=dict(type='bool', default=False), + namespace=dict(type="str"), + keep_younger_than=dict(type="int"), + orphans=dict(type="bool", default=False), ) ) return args def main(): + from ansible_collections.community.okd.plugins.module_utils.openshift_builds import ( + OpenShiftPruneBuilds, + ) - from ansible_collections.community.okd.plugins.module_utils.openshift_builds import OpenShiftPruneBuilds - - module = OpenShiftPruneBuilds(argument_spec=argument_spec(), supports_check_mode=True) + module = OpenShiftPruneBuilds( + argument_spec=argument_spec(), supports_check_mode=True + ) module.run_module() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/okd/plugins/modules/openshift_adm_prune_deployments.py b/ansible_collections/community/okd/plugins/modules/openshift_adm_prune_deployments.py index bdef18460..963e3c94c 100644 --- a/ansible_collections/community/okd/plugins/modules/openshift_adm_prune_deployments.py +++ b/ansible_collections/community/okd/plugins/modules/openshift_adm_prune_deployments.py @@ -5,10 +5,11 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type # STARTREMOVE (downstream) -DOCUMENTATION = r''' +DOCUMENTATION = r""" module: openshift_adm_prune_deployments @@ -45,32 +46,34 @@ options: requirements: - python >= 3.6 - kubernetes >= 12.0.0 -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Prune Deployments from testing namespace community.okd.openshift_adm_prune_deployments: namespace: testing - name: Prune orphans deployments, keep younger than 2hours community.okd.openshift_adm_prune_deployments: - orphans: True + orphans: true keep_younger_than: 120 -''' +""" -RETURN = r''' +RETURN = r""" replication_controllers: type: list description: list of replication controllers candidate for pruning. returned: always -''' +""" # ENDREMOVE (downstream) import copy try: - from ansible_collections.kubernetes.core.plugins.module_utils.args_common import AUTH_ARG_SPEC + from ansible_collections.kubernetes.core.plugins.module_utils.args_common import ( + AUTH_ARG_SPEC, + ) except ImportError as e: pass @@ -79,22 +82,28 @@ def argument_spec(): args = copy.deepcopy(AUTH_ARG_SPEC) args.update( dict( - namespace=dict(type='str',), - keep_younger_than=dict(type='int',), - orphans=dict(type='bool', default=False), + namespace=dict( + type="str", + ), + keep_younger_than=dict( + type="int", + ), + orphans=dict(type="bool", default=False), ) ) return args def main(): - from ansible_collections.community.okd.plugins.module_utils.openshift_adm_prune_deployments import ( - OpenShiftAdmPruneDeployment) + OpenShiftAdmPruneDeployment, + ) - module = OpenShiftAdmPruneDeployment(argument_spec=argument_spec(), supports_check_mode=True) + module = OpenShiftAdmPruneDeployment( + argument_spec=argument_spec(), supports_check_mode=True + ) module.run_module() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/okd/plugins/modules/openshift_adm_prune_images.py b/ansible_collections/community/okd/plugins/modules/openshift_adm_prune_images.py index d470fa871..41bd5c4f2 100644 --- a/ansible_collections/community/okd/plugins/modules/openshift_adm_prune_images.py +++ b/ansible_collections/community/okd/plugins/modules/openshift_adm_prune_images.py @@ -5,10 +5,11 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type # STARTREMOVE (downstream) -DOCUMENTATION = r''' +DOCUMENTATION = r""" module: openshift_adm_prune_images @@ -84,9 +85,9 @@ requirements: - python >= 3.6 - kubernetes >= 12.0.0 - docker-image-py -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" # Prune if only images and their referrers were more than an hour old - name: Prune image with referrer been more than an hour old community.okd.openshift_adm_prune_images: @@ -102,10 +103,10 @@ EXAMPLES = r''' community.okd.openshift_adm_prune_images: registry_url: http://registry.example.org registry_validate_certs: false -''' +""" -RETURN = r''' +RETURN = r""" updated_image_streams: description: - The images streams updated. @@ -275,41 +276,44 @@ deleted_images: }, ... ] -''' +""" # ENDREMOVE (downstream) import copy -from ansible_collections.kubernetes.core.plugins.module_utils.args_common import AUTH_ARG_SPEC +from ansible_collections.kubernetes.core.plugins.module_utils.args_common import ( + AUTH_ARG_SPEC, +) def argument_spec(): args = copy.deepcopy(AUTH_ARG_SPEC) args.update( dict( - namespace=dict(type='str'), - all_images=dict(type='bool', default=True), - keep_younger_than=dict(type='int'), - prune_over_size_limit=dict(type='bool', default=False), - registry_url=dict(type='str'), - registry_validate_certs=dict(type='bool'), - registry_ca_cert=dict(type='path'), - prune_registry=dict(type='bool', default=True), - ignore_invalid_refs=dict(type='bool', default=False), + namespace=dict(type="str"), + all_images=dict(type="bool", default=True), + keep_younger_than=dict(type="int"), + prune_over_size_limit=dict(type="bool", default=False), + registry_url=dict(type="str"), + registry_validate_certs=dict(type="bool"), + registry_ca_cert=dict(type="path"), + prune_registry=dict(type="bool", default=True), + ignore_invalid_refs=dict(type="bool", default=False), ) ) return args def main(): - from ansible_collections.community.okd.plugins.module_utils.openshift_adm_prune_images import ( - OpenShiftAdmPruneImages + OpenShiftAdmPruneImages, ) - module = OpenShiftAdmPruneImages(argument_spec=argument_spec(), supports_check_mode=True) + module = OpenShiftAdmPruneImages( + argument_spec=argument_spec(), supports_check_mode=True + ) module.run_module() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/okd/plugins/modules/openshift_auth.py b/ansible_collections/community/okd/plugins/modules/openshift_auth.py index 422018cc5..4fdb16965 100644 --- a/ansible_collections/community/okd/plugins/modules/openshift_auth.py +++ b/ansible_collections/community/okd/plugins/modules/openshift_auth.py @@ -5,9 +5,10 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type -DOCUMENTATION = r''' +DOCUMENTATION = r""" module: openshift_auth @@ -74,46 +75,49 @@ requirements: - urllib3 - requests - requests-oauthlib -''' +""" -EXAMPLES = r''' -- hosts: localhost +EXAMPLES = r""" +- name: Example Playbook + hosts: localhost module_defaults: group/community.okd.okd: host: https://k8s.example.com/ ca_cert: ca.pem tasks: - - block: - # It's good practice to store login credentials in a secure vault and not - # directly in playbooks. - - include_vars: openshift_passwords.yml - - - name: Log in (obtain access token) - community.okd.openshift_auth: - username: admin - password: "{{ openshift_admin_password }}" - register: openshift_auth_results - - # Previous task provides the token/api_key, while all other parameters - # are taken from module_defaults - - name: Get a list of all pods from any namespace - kubernetes.core.k8s_info: - api_key: "{{ openshift_auth_results.openshift_auth.api_key }}" - kind: Pod - register: pod_list - - always: - - name: If login succeeded, try to log out (revoke access token) - when: openshift_auth_results.openshift_auth.api_key is defined - community.okd.openshift_auth: - state: absent - api_key: "{{ openshift_auth_results.openshift_auth.api_key }}" -''' + - name: Authenticate to OpenShift cluster and gell a list of all pods from any namespace + block: + # It's good practice to store login credentials in a secure vault and not + # directly in playbooks. + - name: Include 'openshift_passwords.yml' + ansible.builtin.include_vars: openshift_passwords.yml + + - name: Log in (obtain access token) + community.okd.openshift_auth: + username: admin + password: "{{ openshift_admin_password }}" + register: openshift_auth_results + + # Previous task provides the token/api_key, while all other parameters + # are taken from module_defaults + - name: Get a list of all pods from any namespace + kubernetes.core.k8s_info: + api_key: "{{ openshift_auth_results.openshift_auth.api_key }}" + kind: Pod + register: pod_list + + always: + - name: If login succeeded, try to log out (revoke access token) + when: openshift_auth_results.openshift_auth.api_key is defined + community.okd.openshift_auth: + state: absent + api_key: "{{ openshift_auth_results.openshift_auth.api_key }}" +""" # Returned value names need to match k8s modules parameter names, to make it # easy to pass returned values of openshift_auth to other k8s modules. # Discussion: https://github.com/ansible/ansible/pull/50807#discussion_r248827899 -RETURN = r''' +RETURN = r""" openshift_auth: description: OpenShift authentication facts. returned: success @@ -164,7 +168,7 @@ k8s_auth: description: Username for authenticating with the API server. returned: success type: str -''' +""" import traceback @@ -179,52 +183,52 @@ import hashlib # 3rd party imports try: import requests + HAS_REQUESTS = True except ImportError: HAS_REQUESTS = False try: from requests_oauthlib import OAuth2Session + HAS_REQUESTS_OAUTH = True except ImportError: HAS_REQUESTS_OAUTH = False try: from urllib3.util import make_headers + HAS_URLLIB3 = True except ImportError: HAS_URLLIB3 = False K8S_AUTH_ARG_SPEC = { - 'state': { - 'default': 'present', - 'choices': ['present', 'absent'], - }, - 'host': {'required': True}, - 'username': {}, - 'password': {'no_log': True}, - 'ca_cert': {'type': 'path', 'aliases': ['ssl_ca_cert']}, - 'validate_certs': { - 'type': 'bool', - 'default': True, - 'aliases': ['verify_ssl'] + "state": { + "default": "present", + "choices": ["present", "absent"], }, - 'api_key': {'no_log': True}, + "host": {"required": True}, + "username": {}, + "password": {"no_log": True}, + "ca_cert": {"type": "path", "aliases": ["ssl_ca_cert"]}, + "validate_certs": {"type": "bool", "default": True, "aliases": ["verify_ssl"]}, + "api_key": {"no_log": True}, } def get_oauthaccesstoken_objectname_from_token(token_name): - """ - openshift convert the access token to an OAuthAccessToken resource name using the algorithm - https://github.com/openshift/console/blob/9f352ba49f82ad693a72d0d35709961428b43b93/pkg/server/server.go#L609-L613 + openshift convert the access token to an OAuthAccessToken resource name using the algorithm + https://github.com/openshift/console/blob/9f352ba49f82ad693a72d0d35709961428b43b93/pkg/server/server.go#L609-L613 """ sha256Prefix = "sha256~" content = token_name.strip(sha256Prefix) - b64encoded = urlsafe_b64encode(hashlib.sha256(content.encode()).digest()).rstrip(b'=') + b64encoded = urlsafe_b64encode(hashlib.sha256(content.encode()).digest()).rstrip( + b"=" + ) return sha256Prefix + b64encoded.decode("utf-8") @@ -234,42 +238,48 @@ class OpenShiftAuthModule(AnsibleModule): self, argument_spec=K8S_AUTH_ARG_SPEC, required_if=[ - ('state', 'present', ['username', 'password']), - ('state', 'absent', ['api_key']), - ] + ("state", "present", ["username", "password"]), + ("state", "absent", ["api_key"]), + ], ) if not HAS_REQUESTS: - self.fail("This module requires the python 'requests' package. Try `pip install requests`.") + self.fail( + "This module requires the python 'requests' package. Try `pip install requests`." + ) if not HAS_REQUESTS_OAUTH: - self.fail("This module requires the python 'requests-oauthlib' package. Try `pip install requests-oauthlib`.") + self.fail( + "This module requires the python 'requests-oauthlib' package. Try `pip install requests-oauthlib`." + ) if not HAS_URLLIB3: - self.fail("This module requires the python 'urllib3' package. Try `pip install urllib3`.") + self.fail( + "This module requires the python 'urllib3' package. Try `pip install urllib3`." + ) def execute_module(self): - state = self.params.get('state') - verify_ssl = self.params.get('validate_certs') - ssl_ca_cert = self.params.get('ca_cert') + state = self.params.get("state") + verify_ssl = self.params.get("validate_certs") + ssl_ca_cert = self.params.get("ca_cert") - self.auth_username = self.params.get('username') - self.auth_password = self.params.get('password') - self.auth_api_key = self.params.get('api_key') - self.con_host = self.params.get('host') + self.auth_username = self.params.get("username") + self.auth_password = self.params.get("password") + self.auth_api_key = self.params.get("api_key") + self.con_host = self.params.get("host") # python-requests takes either a bool or a path to a ca file as the 'verify' param if verify_ssl and ssl_ca_cert: self.con_verify_ca = ssl_ca_cert # path else: - self.con_verify_ca = verify_ssl # bool + self.con_verify_ca = verify_ssl # bool # Get needed info to access authorization APIs self.openshift_discover() changed = False result = dict() - if state == 'present': + if state == "present": new_api_key = self.openshift_login() result = dict( host=self.con_host, @@ -285,87 +295,114 @@ class OpenShiftAuthModule(AnsibleModule): self.exit_json(changed=changed, openshift_auth=result, k8s_auth=result) def openshift_discover(self): - url = urljoin(self.con_host, '.well-known/oauth-authorization-server') + url = urljoin(self.con_host, ".well-known/oauth-authorization-server") ret = requests.get(url, verify=self.con_verify_ca) if ret.status_code != 200: - self.fail_request("Couldn't find OpenShift's OAuth API", method='GET', url=url, - reason=ret.reason, status_code=ret.status_code) + self.fail_request( + "Couldn't find OpenShift's OAuth API", + method="GET", + url=url, + reason=ret.reason, + status_code=ret.status_code, + ) try: oauth_info = ret.json() - self.openshift_auth_endpoint = oauth_info['authorization_endpoint'] - self.openshift_token_endpoint = oauth_info['token_endpoint'] + self.openshift_auth_endpoint = oauth_info["authorization_endpoint"] + self.openshift_token_endpoint = oauth_info["token_endpoint"] except Exception: - self.fail_json(msg="Something went wrong discovering OpenShift OAuth details.", - exception=traceback.format_exc()) + self.fail_json( + msg="Something went wrong discovering OpenShift OAuth details.", + exception=traceback.format_exc(), + ) def openshift_login(self): - os_oauth = OAuth2Session(client_id='openshift-challenging-client') - authorization_url, state = os_oauth.authorization_url(self.openshift_auth_endpoint, - state="1", code_challenge_method='S256') - auth_headers = make_headers(basic_auth='{0}:{1}'.format(self.auth_username, self.auth_password)) + os_oauth = OAuth2Session(client_id="openshift-challenging-client") + authorization_url, state = os_oauth.authorization_url( + self.openshift_auth_endpoint, state="1", code_challenge_method="S256" + ) + auth_headers = make_headers( + basic_auth="{0}:{1}".format(self.auth_username, self.auth_password) + ) # Request authorization code using basic auth credentials ret = os_oauth.get( authorization_url, - headers={'X-Csrf-Token': state, 'authorization': auth_headers.get('authorization')}, + headers={ + "X-Csrf-Token": state, + "authorization": auth_headers.get("authorization"), + }, verify=self.con_verify_ca, - allow_redirects=False + allow_redirects=False, ) if ret.status_code != 302: - self.fail_request("Authorization failed.", method='GET', url=authorization_url, - reason=ret.reason, status_code=ret.status_code) + self.fail_request( + "Authorization failed.", + method="GET", + url=authorization_url, + reason=ret.reason, + status_code=ret.status_code, + ) # In here we have `code` and `state`, I think `code` is the important one qwargs = {} - for k, v in parse_qs(urlparse(ret.headers['Location']).query).items(): + for k, v in parse_qs(urlparse(ret.headers["Location"]).query).items(): qwargs[k] = v[0] - qwargs['grant_type'] = 'authorization_code' + qwargs["grant_type"] = "authorization_code" # Using authorization code given to us in the Location header of the previous request, request a token ret = os_oauth.post( self.openshift_token_endpoint, headers={ - 'Accept': 'application/json', - 'Content-Type': 'application/x-www-form-urlencoded', + "Accept": "application/json", + "Content-Type": "application/x-www-form-urlencoded", # This is just base64 encoded 'openshift-challenging-client:' - 'Authorization': 'Basic b3BlbnNoaWZ0LWNoYWxsZW5naW5nLWNsaWVudDo=' + "Authorization": "Basic b3BlbnNoaWZ0LWNoYWxsZW5naW5nLWNsaWVudDo=", }, data=urlencode(qwargs), - verify=self.con_verify_ca + verify=self.con_verify_ca, ) if ret.status_code != 200: - self.fail_request("Failed to obtain an authorization token.", method='POST', - url=self.openshift_token_endpoint, - reason=ret.reason, status_code=ret.status_code) + self.fail_request( + "Failed to obtain an authorization token.", + method="POST", + url=self.openshift_token_endpoint, + reason=ret.reason, + status_code=ret.status_code, + ) - return ret.json()['access_token'] + return ret.json()["access_token"] def openshift_logout(self): - name = get_oauthaccesstoken_objectname_from_token(self.auth_api_key) headers = { - 'Accept': 'application/json', - 'Content-Type': 'application/json', - 'Authorization': "Bearer {0}".format(self.auth_api_key) + "Accept": "application/json", + "Content-Type": "application/json", + "Authorization": "Bearer {0}".format(self.auth_api_key), } - url = "{0}/apis/oauth.openshift.io/v1/useroauthaccesstokens/{1}".format(self.con_host, name) + url = "{0}/apis/oauth.openshift.io/v1/useroauthaccesstokens/{1}".format( + self.con_host, name + ) json = { "apiVersion": "oauth.openshift.io/v1", "kind": "DeleteOptions", - "gracePeriodSeconds": 0 + "gracePeriodSeconds": 0, } - ret = requests.delete(url, json=json, verify=self.con_verify_ca, headers=headers) + ret = requests.delete( + url, json=json, verify=self.con_verify_ca, headers=headers + ) if ret.status_code != 200: self.fail_json( - msg="Couldn't delete user oauth access token '{0}' due to: {1}".format(name, ret.json().get("message")), - status_code=ret.status_code + msg="Couldn't delete user oauth access token '{0}' due to: {1}".format( + name, ret.json().get("message") + ), + status_code=ret.status_code, ) return True @@ -376,7 +413,7 @@ class OpenShiftAuthModule(AnsibleModule): def fail_request(self, msg, **kwargs): req_info = {} for k, v in kwargs.items(): - req_info['req_' + k] = v + req_info["req_" + k] = v self.fail_json(msg=msg, **req_info) @@ -388,5 +425,5 @@ def main(): module.fail_json(msg=str(e), exception=traceback.format_exc()) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/okd/plugins/modules/openshift_build.py b/ansible_collections/community/okd/plugins/modules/openshift_build.py index 1259a102c..b397e237c 100644 --- a/ansible_collections/community/okd/plugins/modules/openshift_build.py +++ b/ansible_collections/community/okd/plugins/modules/openshift_build.py @@ -5,10 +5,11 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type # STARTREMOVE (downstream) -DOCUMENTATION = r''' +DOCUMENTATION = r""" module: openshift_build @@ -134,9 +135,9 @@ options: requirements: - python >= 3.6 - kubernetes >= 12.0.0 -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" # Starts build from build config default/hello-world - name: Starts build from build config community.okd.openshift_build: @@ -171,9 +172,9 @@ EXAMPLES = r''' build_phases: - New state: cancelled -''' +""" -RETURN = r''' +RETURN = r""" builds: description: - The builds that were started/cancelled. @@ -200,37 +201,47 @@ builds: description: Current status details for the object. returned: success type: dict -''' +""" # ENDREMOVE (downstream) import copy -from ansible_collections.kubernetes.core.plugins.module_utils.args_common import AUTH_ARG_SPEC +from ansible_collections.kubernetes.core.plugins.module_utils.args_common import ( + AUTH_ARG_SPEC, +) def argument_spec(): args = copy.deepcopy(AUTH_ARG_SPEC) args_options = dict( - name=dict(type='str', required=True), - value=dict(type='str', required=True) + name=dict(type="str", required=True), value=dict(type="str", required=True) ) args.update( dict( - state=dict(type='str', choices=['started', 'cancelled', 'restarted'], default="started"), - build_args=dict(type='list', elements='dict', options=args_options), - commit=dict(type='str'), - env_vars=dict(type='list', elements='dict', options=args_options), - build_name=dict(type='str'), - build_config_name=dict(type='str'), - namespace=dict(type='str', required=True), - incremental=dict(type='bool'), - no_cache=dict(type='bool'), - wait=dict(type='bool', default=False), - wait_sleep=dict(type='int', default=5), - wait_timeout=dict(type='int', default=120), - build_phases=dict(type='list', elements='str', default=[], choices=["New", "Pending", "Running"]), + state=dict( + type="str", + choices=["started", "cancelled", "restarted"], + default="started", + ), + build_args=dict(type="list", elements="dict", options=args_options), + commit=dict(type="str"), + env_vars=dict(type="list", elements="dict", options=args_options), + build_name=dict(type="str"), + build_config_name=dict(type="str"), + namespace=dict(type="str", required=True), + incremental=dict(type="bool"), + no_cache=dict(type="bool"), + wait=dict(type="bool", default=False), + wait_sleep=dict(type="int", default=5), + wait_timeout=dict(type="int", default=120), + build_phases=dict( + type="list", + elements="str", + default=[], + choices=["New", "Pending", "Running"], + ), ) ) return args @@ -238,23 +249,24 @@ def argument_spec(): def main(): mutually_exclusive = [ - ('build_name', 'build_config_name'), + ("build_name", "build_config_name"), ] from ansible_collections.community.okd.plugins.module_utils.openshift_builds import ( - OpenShiftBuilds + OpenShiftBuilds, ) + module = OpenShiftBuilds( argument_spec=argument_spec(), mutually_exclusive=mutually_exclusive, required_one_of=[ [ - 'build_name', - 'build_config_name', + "build_name", + "build_config_name", ] ], ) module.run_module() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/okd/plugins/modules/openshift_import_image.py b/ansible_collections/community/okd/plugins/modules/openshift_import_image.py index df0588cf4..e5f371b67 100644 --- a/ansible_collections/community/okd/plugins/modules/openshift_import_image.py +++ b/ansible_collections/community/okd/plugins/modules/openshift_import_image.py @@ -5,10 +5,11 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type # STARTREMOVE (downstream) -DOCUMENTATION = r''' +DOCUMENTATION = r""" module: openshift_import_image @@ -75,9 +76,9 @@ requirements: - python >= 3.6 - kubernetes >= 12.0.0 - docker-image-py -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" # Import tag latest into a new image stream. - name: Import tag latest into new image stream community.okd.openshift_import_image: @@ -122,10 +123,10 @@ EXAMPLES = r''' - mystream3 source: registry.io/repo/image:latest all: true -''' +""" -RETURN = r''' +RETURN = r""" result: description: - List with all ImageStreamImport that have been created. @@ -153,42 +154,44 @@ result: description: Current status details for the object. returned: success type: dict -''' +""" # ENDREMOVE (downstream) import copy -from ansible_collections.kubernetes.core.plugins.module_utils.args_common import AUTH_ARG_SPEC +from ansible_collections.kubernetes.core.plugins.module_utils.args_common import ( + AUTH_ARG_SPEC, +) def argument_spec(): args = copy.deepcopy(AUTH_ARG_SPEC) args.update( dict( - namespace=dict(type='str', required=True), - name=dict(type='raw', required=True), - all=dict(type='bool', default=False), - validate_registry_certs=dict(type='bool'), - reference_policy=dict(type='str', choices=["source", "local"], default="source"), - scheduled=dict(type='bool', default=False), - source=dict(type='str'), + namespace=dict(type="str", required=True), + name=dict(type="raw", required=True), + all=dict(type="bool", default=False), + validate_registry_certs=dict(type="bool"), + reference_policy=dict( + type="str", choices=["source", "local"], default="source" + ), + scheduled=dict(type="bool", default=False), + source=dict(type="str"), ) ) return args def main(): - from ansible_collections.community.okd.plugins.module_utils.openshift_import_image import ( - OpenShiftImportImage + OpenShiftImportImage, ) module = OpenShiftImportImage( - argument_spec=argument_spec(), - supports_check_mode=True + argument_spec=argument_spec(), supports_check_mode=True ) module.run_module() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/okd/plugins/modules/openshift_process.py b/ansible_collections/community/okd/plugins/modules/openshift_process.py index fb00ffbba..01b1bda0f 100644 --- a/ansible_collections/community/okd/plugins/modules/openshift_process.py +++ b/ansible_collections/community/okd/plugins/modules/openshift_process.py @@ -2,13 +2,14 @@ # -*- coding: utf-8 -*- from __future__ import absolute_import, division, print_function + __metaclass__ = type # Copyright (c) 2020-2021, Red Hat # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # STARTREMOVE (downstream) -DOCUMENTATION = r''' +DOCUMENTATION = r""" module: openshift_process short_description: Process an OpenShift template.openshift.io/v1 Template @@ -49,6 +50,7 @@ options: description: - The namespace that resources should be created, updated, or deleted in. - Only used when I(state) is present or absent. + type: str parameters: description: - 'A set of key: value pairs that will be used to set/override values in the Template.' @@ -70,9 +72,9 @@ options: type: str default: rendered choices: [ absent, present, rendered ] -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Process a template in the cluster community.okd.openshift_process: name: nginx-example @@ -87,8 +89,8 @@ EXAMPLES = r''' community.okd.k8s: namespace: default definition: '{{ item }}' - wait: yes - apply: yes + wait: true + apply: true loop: '{{ result.resources }}' - name: Process a template with parameters from an env file and create the resources @@ -98,7 +100,7 @@ EXAMPLES = r''' namespace_target: default parameter_file: 'files/nginx.env' state: present - wait: yes + wait: true - name: Process a local template and create the resources community.okd.openshift_process: @@ -113,10 +115,10 @@ EXAMPLES = r''' parameter_file: files/example.env namespace_target: default state: absent - wait: yes -''' + wait: true +""" -RETURN = r''' +RETURN = r""" result: description: - The created, patched, or otherwise present object. Will be empty in the case of a deletion. @@ -200,11 +202,13 @@ resources: conditions: type: complex description: Array of status conditions for the object. Not guaranteed to be present -''' +""" # ENDREMOVE (downstream) from ansible_collections.kubernetes.core.plugins.module_utils.args_common import ( - AUTH_ARG_SPEC, RESOURCE_ARG_SPEC, WAIT_ARG_SPEC + AUTH_ARG_SPEC, + RESOURCE_ARG_SPEC, + WAIT_ARG_SPEC, ) @@ -213,24 +217,26 @@ def argspec(): argument_spec.update(AUTH_ARG_SPEC) argument_spec.update(WAIT_ARG_SPEC) argument_spec.update(RESOURCE_ARG_SPEC) - argument_spec['state'] = dict(type='str', default='rendered', choices=['present', 'absent', 'rendered']) - argument_spec['namespace'] = dict(type='str') - argument_spec['namespace_target'] = dict(type='str') - argument_spec['parameters'] = dict(type='dict') - argument_spec['name'] = dict(type='str') - argument_spec['parameter_file'] = dict(type='str') + argument_spec["state"] = dict( + type="str", default="rendered", choices=["present", "absent", "rendered"] + ) + argument_spec["namespace"] = dict(type="str") + argument_spec["namespace_target"] = dict(type="str") + argument_spec["parameters"] = dict(type="dict") + argument_spec["name"] = dict(type="str") + argument_spec["parameter_file"] = dict(type="str") return argument_spec def main(): - from ansible_collections.community.okd.plugins.module_utils.openshift_process import ( - OpenShiftProcess) + OpenShiftProcess, + ) module = OpenShiftProcess(argument_spec=argspec(), supports_check_mode=True) module.run_module() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/okd/plugins/modules/openshift_registry_info.py b/ansible_collections/community/okd/plugins/modules/openshift_registry_info.py index a455ac50b..2693d2534 100644 --- a/ansible_collections/community/okd/plugins/modules/openshift_registry_info.py +++ b/ansible_collections/community/okd/plugins/modules/openshift_registry_info.py @@ -5,10 +5,11 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type # STARTREMOVE (downstream) -DOCUMENTATION = r''' +DOCUMENTATION = r""" module: openshift_registry_info @@ -40,9 +41,9 @@ requirements: - python >= 3.6 - kubernetes >= 12.0.0 - docker-image-py -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" # Get registry information - name: Read integrated registry information community.okd.openshift_registry_info: @@ -50,11 +51,11 @@ EXAMPLES = r''' # Read registry integrated information and attempt to contact using local client. - name: Attempt to contact integrated registry using local client community.okd.openshift_registry_info: - check: yes -''' + check: true +""" -RETURN = r''' +RETURN = r""" internal_hostname: description: - The internal registry hostname. @@ -79,36 +80,30 @@ check: description: message describing the ping operation. returned: always type: str -''' +""" # ENDREMOVE (downstream) import copy -from ansible_collections.kubernetes.core.plugins.module_utils.args_common import AUTH_ARG_SPEC +from ansible_collections.kubernetes.core.plugins.module_utils.args_common import ( + AUTH_ARG_SPEC, +) def argument_spec(): args = copy.deepcopy(AUTH_ARG_SPEC) - args.update( - dict( - check=dict(type='bool', default=False) - ) - ) + args.update(dict(check=dict(type="bool", default=False))) return args def main(): - from ansible_collections.community.okd.plugins.module_utils.openshift_registry import ( - OpenShiftRegistry + OpenShiftRegistry, ) - module = OpenShiftRegistry( - argument_spec=argument_spec(), - supports_check_mode=True - ) + module = OpenShiftRegistry(argument_spec=argument_spec(), supports_check_mode=True) module.run_module() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/okd/plugins/modules/openshift_route.py b/ansible_collections/community/okd/plugins/modules/openshift_route.py index e452fc534..ea73db767 100644 --- a/ansible_collections/community/okd/plugins/modules/openshift_route.py +++ b/ansible_collections/community/okd/plugins/modules/openshift_route.py @@ -9,7 +9,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type # STARTREMOVE (downstream) -DOCUMENTATION = r''' +DOCUMENTATION = r""" module: openshift_route short_description: Expose a Service as an OpenShift Route. @@ -133,9 +133,9 @@ options: - insecure default: insecure type: str -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Create hello-world deployment community.okd.k8s: definition: @@ -155,10 +155,10 @@ EXAMPLES = r''' app: hello-kubernetes spec: containers: - - name: hello-kubernetes - image: paulbouwer/hello-kubernetes:1.8 - ports: - - containerPort: 8080 + - name: hello-kubernetes + image: paulbouwer/hello-kubernetes:1.8 + ports: + - containerPort: 8080 - name: Create Service for the hello-world deployment community.okd.k8s: @@ -170,8 +170,8 @@ EXAMPLES = r''' namespace: default spec: ports: - - port: 80 - targetPort: 8080 + - port: 80 + targetPort: 8080 selector: app: hello-kubernetes @@ -183,9 +183,9 @@ EXAMPLES = r''' annotations: haproxy.router.openshift.io/balance: roundrobin register: route -''' +""" -RETURN = r''' +RETURN = r""" result: description: - The Route object that was created or updated. Will be empty in the case of deletion. @@ -303,20 +303,28 @@ duration: returned: when C(wait) is true type: int sample: 48 -''' +""" # ENDREMOVE (downstream) import copy from ansible.module_utils._text import to_native -from ansible_collections.community.okd.plugins.module_utils.openshift_common import AnsibleOpenshiftModule +from ansible_collections.community.okd.plugins.module_utils.openshift_common import ( + AnsibleOpenshiftModule, +) try: - from ansible_collections.kubernetes.core.plugins.module_utils.k8s.runner import perform_action - from ansible_collections.kubernetes.core.plugins.module_utils.k8s.waiter import Waiter + from ansible_collections.kubernetes.core.plugins.module_utils.k8s.runner import ( + perform_action, + ) + from ansible_collections.kubernetes.core.plugins.module_utils.k8s.waiter import ( + Waiter, + ) from ansible_collections.kubernetes.core.plugins.module_utils.args_common import ( - AUTH_ARG_SPEC, WAIT_ARG_SPEC, COMMON_ARG_SPEC + AUTH_ARG_SPEC, + WAIT_ARG_SPEC, + COMMON_ARG_SPEC, ) except ImportError as e: pass @@ -329,7 +337,6 @@ except ImportError: class OpenShiftRoute(AnsibleOpenshiftModule): - def __init__(self): super(OpenShiftRoute, self).__init__( argument_spec=self.argspec, @@ -339,7 +346,7 @@ class OpenShiftRoute(AnsibleOpenshiftModule): self.append_hash = False self.apply = False self.warnings = [] - self.params['merge_type'] = None + self.params["merge_type"] = None @property def argspec(self): @@ -347,80 +354,95 @@ class OpenShiftRoute(AnsibleOpenshiftModule): spec.update(copy.deepcopy(WAIT_ARG_SPEC)) spec.update(copy.deepcopy(COMMON_ARG_SPEC)) - spec['service'] = dict(type='str', aliases=['svc']) - spec['namespace'] = dict(required=True, type='str') - spec['labels'] = dict(type='dict') - spec['name'] = dict(type='str') - spec['hostname'] = dict(type='str') - spec['path'] = dict(type='str') - spec['wildcard_policy'] = dict(choices=['Subdomain'], type='str') - spec['port'] = dict(type='str') - spec['tls'] = dict(type='dict', options=dict( - ca_certificate=dict(type='str'), - certificate=dict(type='str'), - destination_ca_certificate=dict(type='str'), - key=dict(type='str', no_log=False), - insecure_policy=dict(type='str', choices=['allow', 'redirect', 'disallow'], default='disallow'), - )) - spec['termination'] = dict(choices=['edge', 'passthrough', 'reencrypt', 'insecure'], default='insecure') - spec['annotations'] = dict(type='dict') + spec["service"] = dict(type="str", aliases=["svc"]) + spec["namespace"] = dict(required=True, type="str") + spec["labels"] = dict(type="dict") + spec["name"] = dict(type="str") + spec["hostname"] = dict(type="str") + spec["path"] = dict(type="str") + spec["wildcard_policy"] = dict(choices=["Subdomain"], type="str") + spec["port"] = dict(type="str") + spec["tls"] = dict( + type="dict", + options=dict( + ca_certificate=dict(type="str"), + certificate=dict(type="str"), + destination_ca_certificate=dict(type="str"), + key=dict(type="str", no_log=False), + insecure_policy=dict( + type="str", + choices=["allow", "redirect", "disallow"], + default="disallow", + ), + ), + ) + spec["termination"] = dict( + choices=["edge", "passthrough", "reencrypt", "insecure"], default="insecure" + ) + spec["annotations"] = dict(type="dict") return spec def execute_module(self): - - service_name = self.params.get('service') - namespace = self.params['namespace'] - termination_type = self.params.get('termination') - if termination_type == 'insecure': + service_name = self.params.get("service") + namespace = self.params["namespace"] + termination_type = self.params.get("termination") + if termination_type == "insecure": termination_type = None - state = self.params.get('state') + state = self.params.get("state") - if state != 'absent' and not service_name: + if state != "absent" and not service_name: self.fail_json("If 'state' is not 'absent' then 'service' must be provided") # We need to do something a little wonky to wait if the user doesn't supply a custom condition - custom_wait = self.params.get('wait') and not self.params.get('wait_condition') and state != 'absent' + custom_wait = ( + self.params.get("wait") + and not self.params.get("wait_condition") + and state != "absent" + ) if custom_wait: # Don't use default wait logic in perform_action - self.params['wait'] = False - - route_name = self.params.get('name') or service_name - labels = self.params.get('labels') - hostname = self.params.get('hostname') - path = self.params.get('path') - wildcard_policy = self.params.get('wildcard_policy') - port = self.params.get('port') - annotations = self.params.get('annotations') - - if termination_type and self.params.get('tls'): - tls_ca_cert = self.params['tls'].get('ca_certificate') - tls_cert = self.params['tls'].get('certificate') - tls_dest_ca_cert = self.params['tls'].get('destination_ca_certificate') - tls_key = self.params['tls'].get('key') - tls_insecure_policy = self.params['tls'].get('insecure_policy') - if tls_insecure_policy == 'disallow': + self.params["wait"] = False + + route_name = self.params.get("name") or service_name + labels = self.params.get("labels") + hostname = self.params.get("hostname") + path = self.params.get("path") + wildcard_policy = self.params.get("wildcard_policy") + port = self.params.get("port") + annotations = self.params.get("annotations") + + if termination_type and self.params.get("tls"): + tls_ca_cert = self.params["tls"].get("ca_certificate") + tls_cert = self.params["tls"].get("certificate") + tls_dest_ca_cert = self.params["tls"].get("destination_ca_certificate") + tls_key = self.params["tls"].get("key") + tls_insecure_policy = self.params["tls"].get("insecure_policy") + if tls_insecure_policy == "disallow": tls_insecure_policy = None else: - tls_ca_cert = tls_cert = tls_dest_ca_cert = tls_key = tls_insecure_policy = None + tls_ca_cert = ( + tls_cert + ) = tls_dest_ca_cert = tls_key = tls_insecure_policy = None route = { - 'apiVersion': 'route.openshift.io/v1', - 'kind': 'Route', - 'metadata': { - 'name': route_name, - 'namespace': namespace, - 'labels': labels, + "apiVersion": "route.openshift.io/v1", + "kind": "Route", + "metadata": { + "name": route_name, + "namespace": namespace, + "labels": labels, }, - 'spec': {} + "spec": {}, } if annotations: - route['metadata']['annotations'] = annotations + route["metadata"]["annotations"] = annotations - if state != 'absent': - route['spec'] = self.build_route_spec( - service_name, namespace, + if state != "absent": + route["spec"] = self.build_route_spec( + service_name, + namespace, port=port, wildcard_policy=wildcard_policy, hostname=hostname, @@ -434,79 +456,120 @@ class OpenShiftRoute(AnsibleOpenshiftModule): ) result = perform_action(self.svc, route, self.params) - timeout = self.params.get('wait_timeout') - sleep = self.params.get('wait_sleep') + timeout = self.params.get("wait_timeout") + sleep = self.params.get("wait_sleep") if custom_wait: - v1_routes = self.find_resource('Route', 'route.openshift.io/v1', fail=True) + v1_routes = self.find_resource("Route", "route.openshift.io/v1", fail=True) waiter = Waiter(self.client, v1_routes, wait_predicate) - success, result['result'], result['duration'] = waiter.wait(timeout=timeout, sleep=sleep, name=route_name, namespace=namespace) + success, result["result"], result["duration"] = waiter.wait( + timeout=timeout, sleep=sleep, name=route_name, namespace=namespace + ) self.exit_json(**result) - def build_route_spec(self, service_name, namespace, port=None, wildcard_policy=None, hostname=None, path=None, termination_type=None, - tls_insecure_policy=None, tls_ca_cert=None, tls_cert=None, tls_key=None, tls_dest_ca_cert=None): - v1_services = self.find_resource('Service', 'v1', fail=True) + def build_route_spec( + self, + service_name, + namespace, + port=None, + wildcard_policy=None, + hostname=None, + path=None, + termination_type=None, + tls_insecure_policy=None, + tls_ca_cert=None, + tls_cert=None, + tls_key=None, + tls_dest_ca_cert=None, + ): + v1_services = self.find_resource("Service", "v1", fail=True) try: target_service = v1_services.get(name=service_name, namespace=namespace) except NotFoundError: if not port: - self.fail_json(msg="You need to provide the 'port' argument when exposing a non-existent service") + self.fail_json( + msg="You need to provide the 'port' argument when exposing a non-existent service" + ) target_service = None except DynamicApiError as exc: - self.fail_json(msg='Failed to retrieve service to be exposed: {0}'.format(exc.body), - error=exc.status, status=exc.status, reason=exc.reason) + self.fail_json( + msg="Failed to retrieve service to be exposed: {0}".format(exc.body), + error=exc.status, + status=exc.status, + reason=exc.reason, + ) except Exception as exc: - self.fail_json(msg='Failed to retrieve service to be exposed: {0}'.format(to_native(exc)), - error='', status='', reason='') + self.fail_json( + msg="Failed to retrieve service to be exposed: {0}".format( + to_native(exc) + ), + error="", + status="", + reason="", + ) route_spec = { - 'tls': {}, - 'to': { - 'kind': 'Service', - 'name': service_name, + "tls": {}, + "to": { + "kind": "Service", + "name": service_name, }, - 'port': { - 'targetPort': self.set_port(target_service, port), + "port": { + "targetPort": self.set_port(target_service, port), }, - 'wildcardPolicy': wildcard_policy + "wildcardPolicy": wildcard_policy, } # Want to conditionally add these so we don't overwrite what is automically added when nothing is provided if termination_type: - route_spec['tls'] = dict(termination=termination_type.capitalize()) + route_spec["tls"] = dict(termination=termination_type.capitalize()) if tls_insecure_policy: - if termination_type == 'edge': - route_spec['tls']['insecureEdgeTerminationPolicy'] = tls_insecure_policy.capitalize() - elif termination_type == 'passthrough': - if tls_insecure_policy != 'redirect': - self.fail_json("'redirect' is the only supported insecureEdgeTerminationPolicy for passthrough routes") - route_spec['tls']['insecureEdgeTerminationPolicy'] = tls_insecure_policy.capitalize() - elif termination_type == 'reencrypt': - self.fail_json("'tls.insecure_policy' is not supported with reencrypt routes") + if termination_type == "edge": + route_spec["tls"][ + "insecureEdgeTerminationPolicy" + ] = tls_insecure_policy.capitalize() + elif termination_type == "passthrough": + if tls_insecure_policy != "redirect": + self.fail_json( + "'redirect' is the only supported insecureEdgeTerminationPolicy for passthrough routes" + ) + route_spec["tls"][ + "insecureEdgeTerminationPolicy" + ] = tls_insecure_policy.capitalize() + elif termination_type == "reencrypt": + self.fail_json( + "'tls.insecure_policy' is not supported with reencrypt routes" + ) else: - route_spec['tls']['insecureEdgeTerminationPolicy'] = None + route_spec["tls"]["insecureEdgeTerminationPolicy"] = None if tls_ca_cert: - if termination_type == 'passthrough': - self.fail_json("'tls.ca_certificate' is not supported with passthrough routes") - route_spec['tls']['caCertificate'] = tls_ca_cert + if termination_type == "passthrough": + self.fail_json( + "'tls.ca_certificate' is not supported with passthrough routes" + ) + route_spec["tls"]["caCertificate"] = tls_ca_cert if tls_cert: - if termination_type == 'passthrough': - self.fail_json("'tls.certificate' is not supported with passthrough routes") - route_spec['tls']['certificate'] = tls_cert + if termination_type == "passthrough": + self.fail_json( + "'tls.certificate' is not supported with passthrough routes" + ) + route_spec["tls"]["certificate"] = tls_cert if tls_key: - if termination_type == 'passthrough': + if termination_type == "passthrough": self.fail_json("'tls.key' is not supported with passthrough routes") - route_spec['tls']['key'] = tls_key + route_spec["tls"]["key"] = tls_key if tls_dest_ca_cert: - if termination_type != 'reencrypt': - self.fail_json("'destination_certificate' is only valid for reencrypt routes") - route_spec['tls']['destinationCACertificate'] = tls_dest_ca_cert + if termination_type != "reencrypt": + self.fail_json( + "'destination_certificate' is only valid for reencrypt routes" + ) + route_spec["tls"]["destinationCACertificate"] = tls_dest_ca_cert else: - route_spec['tls'] = None + route_spec["tls"] = None if hostname: - route_spec['host'] = hostname + route_spec["host"] = hostname if path: - route_spec['path'] = path + route_spec["path"] = path return route_spec @@ -514,7 +577,7 @@ class OpenShiftRoute(AnsibleOpenshiftModule): if port_arg: return port_arg for p in service.spec.ports: - if p.protocol == 'TCP': + if p.protocol == "TCP": if p.name is not None: return p.name return p.targetPort @@ -525,7 +588,7 @@ def wait_predicate(route): if not (route.status and route.status.ingress): return False for ingress in route.status.ingress: - match = [x for x in ingress.conditions if x.type == 'Admitted'] + match = [x for x in ingress.conditions if x.type == "Admitted"] if not match: return False match = match[0] @@ -538,5 +601,5 @@ def main(): OpenShiftRoute().run_module() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/community/okd/requirements.yml b/ansible_collections/community/okd/requirements.yml index d7d4e6f7a..ac7b465f0 100644 --- a/ansible_collections/community/okd/requirements.yml +++ b/ansible_collections/community/okd/requirements.yml @@ -1,3 +1,4 @@ +--- collections: - name: kubernetes.core - version: '>=2.4.0' + version: '>=3.0.0' diff --git a/ansible_collections/community/okd/setup.cfg b/ansible_collections/community/okd/setup.cfg deleted file mode 100644 index 29c924b86..000000000 --- a/ansible_collections/community/okd/setup.cfg +++ /dev/null @@ -1,3 +0,0 @@ -[flake8] -max-line-length = 160 -ignore = W503,E402 diff --git a/ansible_collections/community/okd/test-requirements.txt b/ansible_collections/community/okd/test-requirements.txt index 7f234688f..99e8534cb 100644 --- a/ansible_collections/community/okd/test-requirements.txt +++ b/ansible_collections/community/okd/test-requirements.txt @@ -2,3 +2,4 @@ coverage==4.5.4 pytest pytest-xdist pytest-forked +pytest-ansible diff --git a/ansible_collections/community/okd/tests/config.yml b/ansible_collections/community/okd/tests/config.yml index 9e402bda7..c26ea5966 100644 --- a/ansible_collections/community/okd/tests/config.yml +++ b/ansible_collections/community/okd/tests/config.yml @@ -1,2 +1,3 @@ +--- modules: - python_requires: ">=3.6" + python_requires: ">=3.9" diff --git a/ansible_collections/community/okd/tests/sanity/ignore-2.16.txt b/ansible_collections/community/okd/tests/sanity/ignore-2.16.txt new file mode 100644 index 000000000..2fd2bdc90 --- /dev/null +++ b/ansible_collections/community/okd/tests/sanity/ignore-2.16.txt @@ -0,0 +1,3 @@ +plugins/modules/k8s.py validate-modules:parameter-type-not-in-doc +plugins/modules/k8s.py validate-modules:return-syntax-error +plugins/modules/openshift_process.py validate-modules:parameter-type-not-in-doc \ No newline at end of file diff --git a/ansible_collections/community/okd/tests/sanity/ignore-2.17.txt b/ansible_collections/community/okd/tests/sanity/ignore-2.17.txt new file mode 100644 index 000000000..2fd2bdc90 --- /dev/null +++ b/ansible_collections/community/okd/tests/sanity/ignore-2.17.txt @@ -0,0 +1,3 @@ +plugins/modules/k8s.py validate-modules:parameter-type-not-in-doc +plugins/modules/k8s.py validate-modules:return-syntax-error +plugins/modules/openshift_process.py validate-modules:parameter-type-not-in-doc \ No newline at end of file diff --git a/ansible_collections/community/okd/tests/sanity/requirements.yml b/ansible_collections/community/okd/tests/sanity/requirements.yml new file mode 100644 index 000000000..244be516f --- /dev/null +++ b/ansible_collections/community/okd/tests/sanity/requirements.yml @@ -0,0 +1,5 @@ +--- +collections: + - name: https://github.com/ansible-collections/kubernetes.core.git + type: git + version: main diff --git a/ansible_collections/community/okd/tests/unit/plugins/module_utils/test_ldap_dn.py b/ansible_collections/community/okd/tests/unit/plugins/module_utils/test_ldap_dn.py index 5835f36d7..c312f3b02 100644 --- a/ansible_collections/community/okd/tests/unit/plugins/module_utils/test_ldap_dn.py +++ b/ansible_collections/community/okd/tests/unit/plugins/module_utils/test_ldap_dn.py @@ -5,28 +5,44 @@ __metaclass__ = type from ansible_collections.community.okd.plugins.module_utils.openshift_ldap import ( openshift_equal_dn, - openshift_ancestorof_dn + openshift_ancestorof_dn, ) import pytest try: - import ldap + import ldap # pylint: disable=unused-import except ImportError: pytestmark = pytest.mark.skip("This test requires the python-ldap library") def test_equal_dn(): - - assert openshift_equal_dn("cn=unit,ou=users,dc=ansible,dc=com", "cn=unit,ou=users,dc=ansible,dc=com") - assert not openshift_equal_dn("cn=unit,ou=users,dc=ansible,dc=com", "cn=units,ou=users,dc=ansible,dc=com") - assert not openshift_equal_dn("cn=unit,ou=users,dc=ansible,dc=com", "cn=unit,ou=user,dc=ansible,dc=com") - assert not openshift_equal_dn("cn=unit,ou=users,dc=ansible,dc=com", "cn=unit,ou=users,dc=ansible,dc=org") + assert openshift_equal_dn( + "cn=unit,ou=users,dc=ansible,dc=com", "cn=unit,ou=users,dc=ansible,dc=com" + ) + assert not openshift_equal_dn( + "cn=unit,ou=users,dc=ansible,dc=com", "cn=units,ou=users,dc=ansible,dc=com" + ) + assert not openshift_equal_dn( + "cn=unit,ou=users,dc=ansible,dc=com", "cn=unit,ou=user,dc=ansible,dc=com" + ) + assert not openshift_equal_dn( + "cn=unit,ou=users,dc=ansible,dc=com", "cn=unit,ou=users,dc=ansible,dc=org" + ) def test_ancestor_of_dn(): - - assert not openshift_ancestorof_dn("cn=unit,ou=users,dc=ansible,dc=com", "cn=unit,ou=users,dc=ansible,dc=com") - assert not openshift_ancestorof_dn("cn=unit,ou=users,dc=ansible,dc=com", "cn=units,ou=users,dc=ansible,dc=com") - assert openshift_ancestorof_dn("ou=users,dc=ansible,dc=com", "cn=john,ou=users,dc=ansible,dc=com") - assert openshift_ancestorof_dn("ou=users,dc=ansible,dc=com", "cn=mathew,ou=users,dc=ansible,dc=com") - assert not openshift_ancestorof_dn("ou=users,dc=ansible,dc=com", "cn=mathew,ou=users,dc=ansible,dc=org") + assert not openshift_ancestorof_dn( + "cn=unit,ou=users,dc=ansible,dc=com", "cn=unit,ou=users,dc=ansible,dc=com" + ) + assert not openshift_ancestorof_dn( + "cn=unit,ou=users,dc=ansible,dc=com", "cn=units,ou=users,dc=ansible,dc=com" + ) + assert openshift_ancestorof_dn( + "ou=users,dc=ansible,dc=com", "cn=john,ou=users,dc=ansible,dc=com" + ) + assert openshift_ancestorof_dn( + "ou=users,dc=ansible,dc=com", "cn=mathew,ou=users,dc=ansible,dc=com" + ) + assert not openshift_ancestorof_dn( + "ou=users,dc=ansible,dc=com", "cn=mathew,ou=users,dc=ansible,dc=org" + ) diff --git a/ansible_collections/community/okd/tests/unit/plugins/module_utils/test_ldap_sync_config.py b/ansible_collections/community/okd/tests/unit/plugins/module_utils/test_ldap_sync_config.py index 1a8ef67e2..303b66f4b 100644 --- a/ansible_collections/community/okd/tests/unit/plugins/module_utils/test_ldap_sync_config.py +++ b/ansible_collections/community/okd/tests/unit/plugins/module_utils/test_ldap_sync_config.py @@ -9,28 +9,26 @@ from ansible_collections.community.okd.plugins.module_utils.openshift_ldap impor def test_missing_url(): - config = dict( - kind="LDAPSyncConfig", - apiVersion="v1", - insecure=True - ) + config = dict(kind="LDAPSyncConfig", apiVersion="v1", insecure=True) err = validate_ldap_sync_config(config) assert err == "url should be non empty attribute." def test_binddn_and_bindpwd_linked(): """ - one of bind_dn and bind_pwd cannot be set alone + one of bind_dn and bind_pwd cannot be set alone """ config = dict( kind="LDAPSyncConfig", apiVersion="v1", url="ldap://LDAP_SERVICE_IP:389", insecure=True, - bindDN="cn=admin,dc=example,dc=org" + bindDN="cn=admin,dc=example,dc=org", ) - credentials_error = "bindDN and bindPassword must both be specified, or both be empty." + credentials_error = ( + "bindDN and bindPassword must both be specified, or both be empty." + ) assert validate_ldap_sync_config(config) == credentials_error @@ -39,7 +37,7 @@ def test_binddn_and_bindpwd_linked(): apiVersion="v1", url="ldap://LDAP_SERVICE_IP:389", insecure=True, - bindPassword="testing1223" + bindPassword="testing1223", ) assert validate_ldap_sync_config(config) == credentials_error @@ -53,11 +51,13 @@ def test_insecure_connection(): insecure=True, ) - assert validate_ldap_sync_config(config) == "Cannot use ldaps scheme with insecure=true." + assert ( + validate_ldap_sync_config(config) + == "Cannot use ldaps scheme with insecure=true." + ) - config.update(dict( - url="ldap://LDAP_SERVICE_IP:389", - ca="path/to/ca/file" - )) + config.update(dict(url="ldap://LDAP_SERVICE_IP:389", ca="path/to/ca/file")) - assert validate_ldap_sync_config(config) == "Cannot specify a ca with insecure=true." + assert ( + validate_ldap_sync_config(config) == "Cannot specify a ca with insecure=true." + ) diff --git a/ansible_collections/community/okd/tests/unit/plugins/module_utils/test_openshift_docker_image.py b/ansible_collections/community/okd/tests/unit/plugins/module_utils/test_openshift_docker_image.py index 99cf15cc5..d23f9f186 100644 --- a/ansible_collections/community/okd/tests/unit/plugins/module_utils/test_openshift_docker_image.py +++ b/ansible_collections/community/okd/tests/unit/plugins/module_utils/test_openshift_docker_image.py @@ -11,7 +11,6 @@ import pytest def test_convert_storage_to_bytes(): - data = [ ("1000", 1000), ("1000Ki", 1000 * 1024), @@ -54,46 +53,48 @@ def validate_docker_response(resp, **kwargs): def test_parse_docker_image_ref_valid_image_with_digest(): - image = "registry.access.redhat.com/ubi8/dotnet-21@sha256:f7718f5efd3436e781ee4322c92ab0c4ae63e61f5b36f1473a57874cc3522669" response, err = parse_docker_image_ref(image) assert err is None - validate_docker_response(response, - hostname="registry.access.redhat.com", - namespace="ubi8", - name="dotnet-21", - digest="sha256:f7718f5efd3436e781ee4322c92ab0c4ae63e61f5b36f1473a57874cc3522669") + validate_docker_response( + response, + hostname="registry.access.redhat.com", + namespace="ubi8", + name="dotnet-21", + digest="sha256:f7718f5efd3436e781ee4322c92ab0c4ae63e61f5b36f1473a57874cc3522669", + ) def test_parse_docker_image_ref_valid_image_with_tag_latest(): - image = "registry.access.redhat.com/ubi8/dotnet-21:latest" response, err = parse_docker_image_ref(image) assert err is None - validate_docker_response(response, - hostname="registry.access.redhat.com", - namespace="ubi8", - name="dotnet-21", - tag="latest") + validate_docker_response( + response, + hostname="registry.access.redhat.com", + namespace="ubi8", + name="dotnet-21", + tag="latest", + ) def test_parse_docker_image_ref_valid_image_with_tag_int(): - image = "registry.access.redhat.com/ubi8/dotnet-21:0.0.1" response, err = parse_docker_image_ref(image) assert err is None - validate_docker_response(response, - hostname="registry.access.redhat.com", - namespace="ubi8", - name="dotnet-21", - tag="0.0.1") + validate_docker_response( + response, + hostname="registry.access.redhat.com", + namespace="ubi8", + name="dotnet-21", + tag="0.0.1", + ) def test_parse_docker_image_ref_invalid_image(): - # The hex value of the sha256 is not valid image = "registry.access.redhat.com/dotnet-21@sha256:f7718f5efd3436e781ee4322c92ab0c4ae63e61f5b36f1473a57874cc3522" response, err = parse_docker_image_ref(image) @@ -101,7 +102,6 @@ def test_parse_docker_image_ref_invalid_image(): def test_parse_docker_image_ref_valid_image_without_hostname(): - image = "ansible:2.10.0" response, err = parse_docker_image_ref(image) assert err is None @@ -110,16 +110,18 @@ def test_parse_docker_image_ref_valid_image_without_hostname(): def test_parse_docker_image_ref_valid_image_without_hostname_and_with_digest(): - image = "ansible@sha256:f7718f5efd3436e781ee4322c92ab0c4ae63e61f5b36f1473a57874cc3522669" response, err = parse_docker_image_ref(image) assert err is None - validate_docker_response(response, name="ansible", digest="sha256:f7718f5efd3436e781ee4322c92ab0c4ae63e61f5b36f1473a57874cc3522669") + validate_docker_response( + response, + name="ansible", + digest="sha256:f7718f5efd3436e781ee4322c92ab0c4ae63e61f5b36f1473a57874cc3522669", + ) def test_parse_docker_image_ref_valid_image_with_name_only(): - image = "ansible" response, err = parse_docker_image_ref(image) assert err is None @@ -128,25 +130,27 @@ def test_parse_docker_image_ref_valid_image_with_name_only(): def test_parse_docker_image_ref_valid_image_without_hostname_with_namespace_and_name(): - image = "ibmcom/pause@sha256:fcaff905397ba63fd376d0c3019f1f1cb6e7506131389edbcb3d22719f1ae54d" response, err = parse_docker_image_ref(image) assert err is None - validate_docker_response(response, - name="pause", - namespace="ibmcom", - digest="sha256:fcaff905397ba63fd376d0c3019f1f1cb6e7506131389edbcb3d22719f1ae54d") + validate_docker_response( + response, + name="pause", + namespace="ibmcom", + digest="sha256:fcaff905397ba63fd376d0c3019f1f1cb6e7506131389edbcb3d22719f1ae54d", + ) def test_parse_docker_image_ref_valid_image_with_complex_namespace_name(): - image = "registry.redhat.io/jboss-webserver-5/webserver54-openjdk11-tomcat9-openshift-rhel7:1.0" response, err = parse_docker_image_ref(image) assert err is None - validate_docker_response(response, - hostname="registry.redhat.io", - name="webserver54-openjdk11-tomcat9-openshift-rhel7", - namespace="jboss-webserver-5", - tag="1.0") + validate_docker_response( + response, + hostname="registry.redhat.io", + name="webserver54-openjdk11-tomcat9-openshift-rhel7", + namespace="jboss-webserver-5", + tag="1.0", + ) diff --git a/ansible_collections/community/okd/tox.ini b/ansible_collections/community/okd/tox.ini new file mode 100644 index 000000000..57fd1d921 --- /dev/null +++ b/ansible_collections/community/okd/tox.ini @@ -0,0 +1,37 @@ +[tox] +skipsdist = True + +[testenv] +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt +install_command = pip install {opts} {packages} + +[testenv:black] +deps = + black >= 23.0, < 24.0 + +commands = + black {toxinidir}/plugins {toxinidir}/tests + +[testenv:ansible-lint] +deps = + ansible-lint==6.21.0 +changedir = {toxinidir} +commands = + ansible-lint + +[testenv:linters] +deps = + flake8 + {[testenv:black]deps} + +commands = + black -v --check --diff {toxinidir}/plugins {toxinidir}/tests + flake8 {toxinidir} + +[flake8] +# E123, E125 skipped as they are invalid PEP-8. +exclude = .git,.tox,tests/output +ignore = E123,E125,E203,E402,E501,E741,F401,F811,F841,W503 +max-line-length = 160 +builtins = _ -- cgit v1.2.3